Просмотр исходного кода

Merge branch 'master' of https://github.com/haverland/AI-on-the-edge-device

Frank Haverland 3 лет назад
Родитель
Сommit
086aa3134b
100 измененных файлов с 3120 добавлено и 736 удалено
  1. 106 0
      .github/workflows/build.yaml
  2. 2 0
      FeatureRequest.md
  3. 35 3
      README.md
  4. 3 1
      code/components/esp-nn/CMakeLists.txt
  5. 4 4
      code/components/esp-nn/Kconfig.projbuild
  6. 4 3
      code/components/esp-nn/README.md
  7. 5 5
      code/components/esp-nn/include/esp_nn.h
  8. 1 0
      code/components/esp-nn/include/esp_nn_ansi_c.h
  9. 82 56
      code/components/esp-nn/include/esp_nn_ansi_headers.h
  10. 83 0
      code/components/esp-nn/include/esp_nn_defs.h
  11. 24 54
      code/components/esp-nn/include/esp_nn_esp32s3.h
  12. 9 10
      code/components/esp-nn/include/esp_nn_generic_opt.h
  13. 50 13
      code/components/esp-nn/src/common/common_functions.h
  14. 30 26
      code/components/esp-nn/src/convolution/esp_nn_conv_ansi.c
  15. 106 79
      code/components/esp-nn/src/convolution/esp_nn_conv_esp32s3.c
  16. 179 0
      code/components/esp-nn/src/convolution/esp_nn_conv_opt.c
  17. 30 27
      code/components/esp-nn/src/convolution/esp_nn_depthwise_conv_ansi.c
  18. 291 0
      code/components/esp-nn/src/convolution/esp_nn_depthwise_conv_opt.c
  19. 97 37
      code/components/esp-nn/src/convolution/esp_nn_depthwise_conv_s8_esp32s3.c
  20. 8 0
      code/components/esp-nn/test_app/sdkconfig.defaults.esp32s3
  21. 16 4
      code/components/esp-nn/tests/src/basic_math_test.c
  22. 70 36
      code/components/esp-nn/tests/src/convolution_test.c
  23. BIN
      code/components/esp-nn_20220724.zip
  24. BIN
      code/components/esp-nn_20220827.zip
  25. BIN
      code/components/esp32-camera-master.zip
  26. BIN
      code/components/esp32-camera-master_20220724.zip
  27. 7 1
      code/components/jomjol_controlcamera/ClassControllCamera.cpp
  28. 95 21
      code/components/jomjol_fileserver_ota/server_file.cpp
  29. 2 0
      code/components/jomjol_fileserver_ota/server_file.h
  30. 3 0
      code/components/jomjol_fileserver_ota/server_help.cpp
  31. 99 22
      code/components/jomjol_fileserver_ota/server_ota.cpp
  32. 186 118
      code/components/jomjol_flowcontroll/ClassFlowCNNGeneral.cpp
  33. 13 6
      code/components/jomjol_flowcontroll/ClassFlowCNNGeneral.h
  34. 3 0
      code/components/jomjol_flowcontroll/ClassFlowControll.cpp
  35. 4 4
      code/components/jomjol_flowcontroll/ClassFlowDefineTypes.h
  36. 5 0
      code/components/jomjol_flowcontroll/ClassFlowImage.cpp
  37. 93 10
      code/components/jomjol_flowcontroll/ClassFlowMQTT.cpp
  38. 1 0
      code/components/jomjol_flowcontroll/ClassFlowMQTT.h
  39. 68 24
      code/components/jomjol_flowcontroll/ClassFlowPostProcessing.cpp
  40. 4 4
      code/components/jomjol_flowcontroll/ClassFlowPostProcessing.h
  41. 47 2
      code/components/jomjol_helper/Helper.cpp
  42. 3 0
      code/components/jomjol_helper/Helper.h
  43. 2 1
      code/components/jomjol_logfile/ClassLogFile.cpp
  44. 104 15
      code/components/jomjol_mqtt/interface_mqtt.cpp
  45. 2 2
      code/components/jomjol_mqtt/interface_mqtt.h
  46. 66 1
      code/components/jomjol_tfliteclass/server_tflite.cpp
  47. 6 1
      code/components/tflite-lib/CMakeLists.txt
  48. 6 0
      code/components/tflite-lib/tensorflow/lite/builtin_ops.h
  49. 7 1
      code/components/tflite-lib/tensorflow/lite/c/c_api_types.h
  50. 24 10
      code/components/tflite-lib/tensorflow/lite/c/common.cc
  51. 32 3
      code/components/tflite-lib/tensorflow/lite/c/common.h
  52. 33 15
      code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.cc
  53. 5 0
      code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.h
  54. 52 1
      code/components/tflite-lib/tensorflow/lite/core/api/op_resolver.h
  55. 4 4
      code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft.cc
  56. 2 1
      code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc
  57. 0 1
      code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h
  58. 1 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/common.h
  59. 10 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/compatibility.h
  60. 1 1
      code/components/tflite-lib/tensorflow/lite/kernels/internal/portable_tensor_utils.h
  61. 1 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/add.h
  62. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/concatenation.h
  63. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/conv.h
  64. 247 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/div.h
  65. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/fully_connected.h
  66. 5 3
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/hard_swish.h
  67. 1 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/add.h
  68. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h
  69. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h
  70. 90 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h
  71. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h
  72. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h
  73. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h
  74. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h
  75. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h
  76. 1 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h
  77. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h
  78. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/mul.h
  79. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/pooling.h
  80. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/prelu.h
  81. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h
  82. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/reduce.h
  83. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/requantize.h
  84. 1 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h
  85. 1 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/softmax.h
  86. 2 0
      code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/transpose_conv.h
  87. 6 3
      code/components/tflite-lib/tensorflow/lite/kernels/internal/runtime_shape.h
  88. 5 5
      code/components/tflite-lib/tensorflow/lite/kernels/internal/types.h
  89. 9 1
      code/components/tflite-lib/tensorflow/lite/kernels/kernel_util.h
  90. 2 0
      code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.cc
  91. 3 3
      code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h
  92. 170 0
      code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.cc
  93. 105 0
      code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h
  94. 52 0
      code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.cc
  95. 59 0
      code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h
  96. 21 19
      code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.cc
  97. 13 14
      code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h
  98. 44 40
      code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.cc
  99. 17 15
      code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h
  100. 6 6
      code/components/tflite-lib/tensorflow/lite/micro/fake_micro_context.cc

+ 106 - 0
.github/workflows/build.yaml

@@ -0,0 +1,106 @@
+name: Build and Pack
+
+on: [push]
+
+jobs:
+  build:
+
+    runs-on: ubuntu-latest
+
+    steps:
+    - uses: actions/checkout@v2
+
+    - name: Cache PlatformIO
+      uses: actions/cache@v2
+      with:
+        path: ~/.platformio
+        key: ${{ runner.os }}-${{ hashFiles('**/lockfiles') }}
+
+    - name: Set up Python
+      uses: actions/setup-python@v2
+    - name: Install PlatformIO
+      run: |
+        python -m pip install --upgrade pip
+        pip install --upgrade platformio
+
+    - name: Set Variables
+      id: vars
+      run: |
+        echo "::set-output name=sha_short::$(git rev-parse --short HEAD)"
+        echo "::set-output name=date_time::$(git log -1 --format="%at" | xargs -I{} date -d @{} '+%Y-%m-%d %H:%M:%S')"
+        echo "::set-output name=date_time_filename::$(git log -1 --format="%at" | xargs -I{} date -d @{} '+%Y-%m-%d_%H-%M-%S')"
+        #echo "::set-output name=version_string::${{ steps.vars.outputs.date_time_filename }}__${{ github.ref_name }}_(${{ steps.vars.outputs.sha_short }})"
+        #echo "Version String: ${{ steps.vars.outputs.version_string }}"
+
+
+    - name: Set Version used in HTML Info page
+      run: echo "${{ steps.vars.outputs.date_time }}, ${{ github.ref_name }} (${{ steps.vars.outputs.sha_short }})" > "sd-card/html/version.txt"
+
+
+    - name: Build Firmware
+#      run: mkdir -p ./code/.pio/build/esp32cam/; touch ./code/.pio/build/esp32cam/firmware.bin # Testing
+      run: cd code; platformio run --environment esp32cam
+
+
+
+    # Old OTA concept
+    # firmware__*.zip needs to be unpacked before attaching to the release!
+    # The bin filename can contain versioning.
+    - name: Rename firmware file to contain versioning (old ota)
+      run: |
+        mkdir -p ./dist_old_ota
+        cp "./code/.pio/build/esp32cam/firmware.bin" "./dist_old_ota/firmware__${{ steps.vars.outputs.date_time_filename }}__${{ github.ref_name }}_(${{ steps.vars.outputs.sha_short }}).bin"
+        ls -l ./dist_old_ota
+
+    - name: Upload Firmware artifact (old OTA concept)
+      uses: actions/upload-artifact@v3
+      with:
+        name: "firmware__extract_before_upload__only_needed_for_migration_from_11.2.0"
+        path: ./dist_old_ota/*
+
+    - name: Upload Web interface artifact (old OTA concept)
+      uses: actions/upload-artifact@v3
+      with:
+        name: "html__only_needed_for_migration_from_11.2.0__${{ steps.vars.outputs.date_time_filename }}__${{ github.ref_name }}_(${{ steps.vars.outputs.sha_short }})"
+        path: ./sd-card/html/*
+
+
+
+    # New OTA concept
+    # update__version.zip file with following content:
+    #  - /firmware.bin
+    #  - (optional) /html/*
+    #  - (optional) /config/*.tfl        
+    - name: Prepare update.zip artifact
+      run: |
+        mkdir -p ./dist
+        cp "./code/.pio/build/esp32cam/firmware.bin" "dist/firmware.bin"
+
+    - name: Upload update.zip Artifact (Firmware only)
+      uses: actions/upload-artifact@v3
+      with:
+        name: "update_firmware_only__${{ steps.vars.outputs.date_time_filename }}__${{ github.ref_name }}_(${{ steps.vars.outputs.sha_short }})"
+        path: ./dist/*
+        
+
+    - name: Prepare update.zip artifact (Firmware + Web UI)
+      run: cp -r ./sd-card/html ./dist/
+
+    - name: Upload update.zip artifact (Firmware + Web UI)
+      uses: actions/upload-artifact@v3
+      with:
+        name: "update_firmware+web_ui__${{ steps.vars.outputs.date_time_filename }}__${{ github.ref_name }}_(${{ steps.vars.outputs.sha_short }})"
+        path: ./dist/*
+        
+
+    - name: Prepare update.zip artifact (Firmware + Web UI + CNN)
+      run: |
+        mkdir ./dist/config/
+        cp ./sd-card/config/*.tfl ./dist/config/ 2>/dev/null || true
+        cp ./sd-card/config/*.tflite ./dist/config/ 2>/dev/null || true
+
+    - name: Upload update.zip artifact (Firmware + Web UI + CNN)
+      uses: actions/upload-artifact@v3
+      with:
+        name: "update_firmware+web_ui+cnn__${{ steps.vars.outputs.date_time_filename }}__${{ github.ref_name }}_(${{ steps.vars.outputs.sha_short }})"
+        path: ./dist/*

+ 2 - 0
FeatureRequest.md

@@ -66,6 +66,8 @@ ____
 * Let the device be normally in deep sleep state, and wake it up periodically to collect data and push it via MQTT or HTTP post.
 * Support ESP-NOW to reduce the overhead of connecting to wifi and mqtt 
 * the above should enable battery powered applications
+
+* An other way to set deep sleep would be to enable it in a specific period (at night).
   
 
 #### #19 Extended log informations

+ 35 - 3
README.md

@@ -40,11 +40,43 @@ In other cases you can contact the developer via email: <img src="https://raw.gi
 
 ------
 
-##### 11.1.0 - Intermediate Digits
+##### 11.3.0 - Intermediate Digits (2022-09-17)
 
-- Updated postprocessing algorithm (fix from @haverland)
+- **ATTENTION**: 
+  - first update the 'firmware.bin' and ensure that the new version is running
 
-##### 11.0.1 - Intermediate Digits
+  - Only afterwards update the 'html.zip'
+  
+  - Otherwise the downwards compatibility of the new counter clockwise feature is not given and you end in a reboot loop, that needs manual flashing!
+  
+
+
+- Increased precision (more than 6-7 digits)
+- Implements Counter Clockwise Analog Pointers
+- Improved post processing algorithm
+- Debugging: intensive use of testcases
+- MQTT: improved handling, extended logging, automated reconnect
+- HTML: Backup Option for Configuration
+- HTML: Improved Reboot
+- HTML: Update WebUI (Reboot, Infos, CPU Temp, RSSI)
+- This version is largely also based on the work of **[caco3](https://github.com/caco3)**,  **[adellafave](https://github.com/adellafave)**,  **[haverland](https://github.com/haverland)**,  **[stefanbode](https://github.com/stefanbode)**, **[PLCHome](https://github.com/PLCHome)**
+
+##### 11.2.0 - Intermediate Digits (2022-08-28)
+
+- Updated Tensorflow / TFlite to newest tflite (version as of 2022-07-27)
+- Updated analog neural network file (`ana-cont_11.3.0_s2.tflite` - default, `ana-class100_0120_s1_q.tflite`)
+- Updated digital neural network file (`dig-cont_0570_s3.tflite` - default, `dig-class100_0120_s2_q.tflite`)
+
+- Added automated filtering of tflite-file in the graphical configuration (thanks to @**[caco3](https://github.com/caco3)**)
+- Updated consistency algorithm & test cases
+- HTML: added favicon and system name, Improved reboot dialog  (thanks to @**[caco3](https://github.com/caco3)**)
+
+##### 11.1.1 - Intermediate Digits (2022-08-22)
+
+- New and improved consistency check (especially with analog and digital counters mixed)
+- Bug Fix: digital counter algorithm
+
+##### 11.0.1 - Intermediate Digits (2022-08-18)
 
 - **NEW v11.0.1**: Bug Fix InfluxDB configuration (only update of html.zip necessary)
 

+ 3 - 1
code/components/esp-nn/CMakeLists.txt

@@ -5,7 +5,9 @@ set(c_srcs
     "src/basic_math/esp_nn_add_ansi.c"
     "src/basic_math/esp_nn_mul_ansi.c"
     "src/convolution/esp_nn_conv_ansi.c"
+    "src/convolution/esp_nn_conv_opt.c"
     "src/convolution/esp_nn_depthwise_conv_ansi.c"
+    "src/convolution/esp_nn_depthwise_conv_opt.c"
     "src/fully_connected/esp_nn_fully_connected_ansi.c"
     "src/softmax/esp_nn_softmax_ansi.c"
     "src/softmax/esp_nn_softmax_opt.c"
@@ -23,7 +25,7 @@ if(CONFIG_IDF_TARGET_ESP32S3)
         "src/convolution/esp_nn_conv_esp32s3.c"
         "src/convolution/esp_nn_depthwise_conv_s8_esp32s3.c"
         "src/convolution/esp_nn_conv_s16_mult8_esp32s3.S"
-        "src/convolution/esp_nn_conv_s16_mult8_1x1_esp32s3.S"
+        "src/convolution/esp_nn_conv_s8_mult8_1x1_esp32s3.S"
         "src/convolution/esp_nn_conv_s16_mult4_1x1_esp32s3.S"
         "src/convolution/esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3.S"
         "src/convolution/esp_nn_depthwise_conv_s16_mult1_esp32s3.S"

+ 4 - 4
code/components/esp-nn/Kconfig.projbuild

@@ -6,8 +6,8 @@ choice NN_OPTIMIZATIONS
    help
       Use ANSI-C versions for verification and debug purpose.
       Optimisations are automatically picked up for a chipset.
-      For ESP32-S3, assembly Optimisations are selected.
-      For ESP32, just the ANSI C versions are selected for now.
+      For ESP32-S3, assembly optimisations are selected.
+      For other platforms(viz., ESP32, ESP32-C3), generic optimisations are used.
 
 config NN_ANSI_C
    bool "ANSI C"
@@ -17,8 +17,8 @@ config NN_OPTIMIZED
    bool "Optimized versions"
    help
       Optimisations are automatically picked up for a chipset.
-      For ESP32-S3, assembly Optimisations are selected.
-      For ESP32, just the ANSI C versions are selected for now.
+      For ESP32-S3, assembly optimisations are selected.
+      For other platforms(viz., ESP32, ESP32-C3), generic optimisations are used.
 endchoice
 
 config NN_OPTIMIZATIONS

+ 4 - 3
code/components/esp-nn/README.md

@@ -7,7 +7,8 @@ The library contains optimised NN (Neural Network) functions for various Espress
 
 * Supported ESP chipsets include:
    * ESP32-S3 (Assembly versions optimised to benefit from vector instructions of ESP32-S3)
-   * ESP32 (ANSI C versions)
+   * ESP32 (Generic optimisations)
+   * ESP32-C3 (Generic optimisations)
 
 ## Performance
 
@@ -39,8 +40,8 @@ The library contains optimised NN (Neural Network) functions for various Espress
      * Optimized versions
      * ANSI C
 
-  * Default selection is for `Optimized versions`. For ESP32-S3, assembly versions are automatically selected, whereas for ESP32,  ANSI-C versions are selected by default.
-  * For debugging purposes, you may want to select `ANSI C`
+  * Default selection is for `Optimized versions`. For ESP32-S3, assembly versions are automatically selected, whereas for other chipsets (viz., ESP32, ESP32-C3), generic optimisations are selected.
+  * For debugging purposes, you may want to select `ANSI C` reference versions.
 
 
 ## Contributing

+ 5 - 5
code/components/esp-nn/include/esp_nn.h

@@ -15,6 +15,7 @@
 #pragma once
 
 #if defined(CONFIG_NN_OPTIMIZED)
+// select apt optimisations
 #ifdef CONFIG_IDF_TARGET_ESP32S3
 #define ARCH_ESP32_S3 1
 #endif
@@ -31,12 +32,11 @@ extern "C" {
 #include "esp_nn_ansi_headers.h"
 
 #if defined(CONFIG_NN_OPTIMIZED)
-#ifdef ARCH_ESP32_S3
+#if defined(ARCH_ESP32_S3)
 #include "esp_nn_esp32s3.h"
-#endif
-#ifdef ARCH_ESP32
-#include "esp_nn_esp32.h"
-#endif
+#else // for other platforms use generic optimisations
+#include "esp_nn_generic_opt.h"
+#endif // #if defined(ARCH_ESP32_S3)
 #else
 #include "esp_nn_ansi_c.h"
 #endif

+ 1 - 0
code/components/esp-nn/include/esp_nn_ansi_c.h

@@ -19,6 +19,7 @@
 
 #pragma once
 
+#include "esp_nn_defs.h"
 #include "esp_nn_ansi_headers.h"
 
 #define esp_nn_add_elementwise_s8 esp_nn_add_elementwise_s8_ansi

+ 82 - 56
code/components/esp-nn/include/esp_nn_ansi_headers.h

@@ -18,8 +18,7 @@
  * @file        Header definitions to include for esp_nn reference functions
  */
 
-#include <stdint.h>
-
+#include "esp_nn_defs.h"
 /************************** Basic math functions ****************************/
 
 /**
@@ -81,28 +80,15 @@ void esp_nn_mul_elementwise_s8_ansi(const int8_t *input1_data,
  *              optimization notes: Though input_offset is int32 type,
  *              offset values are contained in 8 bits [-128, 127]
  */
-void esp_nn_depthwise_conv_s8_ansi(const int8_t *input_data,
-                                   const uint16_t input_wd,
-                                   const uint16_t input_ht,
-                                   const uint16_t channels,
-                                   const int32_t input_offset,
-                                   const uint16_t pad_wd,
-                                   const uint16_t pad_ht,
-                                   const uint16_t stride_wd,
-                                   const uint16_t stride_ht,
-                                   const uint16_t ch_mult,
+void esp_nn_depthwise_conv_s8_ansi(const data_dims_t *input_dims,
+                                   const int8_t *input_data,
+                                   const data_dims_t *filter_dims,
                                    const int8_t *filter_data,
-                                   const uint16_t filter_wd,
-                                   const uint16_t filter_ht,
                                    const int32_t *bias,
+                                   const data_dims_t *output_dims,
                                    int8_t *out_data,
-                                   const uint16_t out_wd,
-                                   const uint16_t out_ht,
-                                   const int32_t out_offset,
-                                   const int32_t *out_shift,
-                                   const int32_t *out_mult,
-                                   const int32_t activation_min,
-                                   const int32_t activation_max);
+                                   const dw_conv_params_t *conv_params,
+                                   const quant_data_t *quant_data);
 
 /**
  * @brief       2d-convolution channelwise
@@ -112,43 +98,26 @@ void esp_nn_depthwise_conv_s8_ansi(const int8_t *input_data,
  *              inputs type: int8_t, output: int8_t
  *              input offsets: although int32_t, they are contained in 8 bits [-128, 127]
  */
-void esp_nn_conv_s8_ansi(const int8_t *input_data,
-                         const uint16_t input_wd,
-                         const uint16_t input_ht,
-                         const uint16_t in_channels,
-                         const int32_t input_offset,
-                         const uint16_t pad_wd,
-                         const uint16_t pad_ht,
-                         const uint16_t stride_wd,
-                         const uint16_t stride_ht,
+void esp_nn_conv_s8_ansi(const data_dims_t *input_dims,
+                         const int8_t *input_data,
+                         const data_dims_t *filter_dims,
                          const int8_t *filter_data,
-                         const uint16_t filter_wd,
-                         const uint16_t filter_ht,
                          const int32_t *bias,
+                         const data_dims_t *output_dims,
                          int8_t *out_data,
-                         const uint16_t out_wd,
-                         const uint16_t out_ht,
-                         const uint16_t out_channels,
-                         const int32_t out_offset,
-                         const int32_t *out_shift,
-                         const int32_t *out_mult,
-                         const int32_t activation_min,
-                         const int32_t activation_max);
-
-int esp_nn_get_conv_scratch_size_ansi(const uint16_t input_wd,
-                                      const uint16_t input_ht,
-                                      const uint16_t in_ch,
-                                      const uint16_t out_ch,
-                                      const uint16_t filter_wd,
-                                      const uint16_t filter_ht);
+                         const conv_params_t *conv_params,
+                         const quant_data_t *quant_data);
+
+int esp_nn_get_conv_scratch_size_ansi(const data_dims_t *input_dims,
+                                      const data_dims_t *filter_dims,
+                                      const data_dims_t *output_dims,
+                                      const conv_params_t *conv_params);
 void esp_nn_set_conv_scratch_buf_ansi(const void *buf);
 
-int esp_nn_get_depthwise_conv_scratch_size_ansi(const uint16_t input_wd,
-                                                const uint16_t input_ht,
-                                                const uint16_t channels,
-                                                const uint16_t ch_mult,
-                                                const uint16_t filter_wd,
-                                                const uint16_t filter_ht);
+int esp_nn_get_depthwise_conv_scratch_size_ansi(const data_dims_t *input_dims,
+                                                const data_dims_t *filter_dims,
+                                                const data_dims_t *output_dims,
+                                                const dw_conv_params_t *conv_params);
 void esp_nn_set_depthwise_conv_scratch_buf_ansi(const void *buf);
 
 /************************** Activation functions *****************************/
@@ -252,9 +221,6 @@ int32_t esp_nn_get_softmax_scratch_size_opt(const int32_t width, const int32_t h
  */
 void esp_nn_set_softmax_scratch_buf_ansi(void *buffer);
 
-/* ANSI C function to be hooked up when optimised version needed */
-void esp_nn_set_softmax_scratch_buf_opt(void *buffer);
-
 /**
  * @brief       reference softmax function
  *
@@ -268,6 +234,66 @@ void esp_nn_softmax_s8_ansi(const int8_t *input_data,
                             const int32_t diff_min,
                             int8_t *output_data);
 
+
+//////////////////////////// Generic optimisations /////////////////////////////
+
+/************************** Convolution functions *****************************/
+
+/**
+ * @brief       2d-convolution channelwise optimized version
+ *
+ * @note        operation: result += (input + offset) * filter
+ *
+ *              inputs type: int8_t, output: int8_t
+ *              input offsets: although int32_t, they are contained in 8 bits [-128, 127]
+ */
+void esp_nn_conv_s8_opt(const data_dims_t *input_dims,
+                        const int8_t *input_data,
+                        const data_dims_t *filter_dims,
+                        const int8_t *filter_data,
+                        const int32_t *bias,
+                        const data_dims_t *output_dims,
+                        int8_t *out_data,
+                        const conv_params_t *conv_params,
+                        const quant_data_t *quant_data);
+
+/**
+ * @brief       depthwise convolution per channel optimized version
+ *
+ * @note        inputs type: int8_t, output: int8_t
+ *              Version used in tflite is per channel.
+ *              This version follows the same footsprints.
+ *              Meaning, it has per out_channel shift and multiplier for
+ *              requantization
+ *
+ *              optimization notes: Though input_offset is int32 type,
+ *              offset values are contained in 8 bits [-128, 127]
+ */
+void esp_nn_depthwise_conv_s8_opt(const data_dims_t *input_dims,
+                                  const int8_t *input_data,
+                                  const data_dims_t *filter_dims,
+                                  const int8_t *filter_data,
+                                  const int32_t *bias,
+                                  const data_dims_t *output_dims,
+                                  int8_t *out_data,
+                                  const dw_conv_params_t *conv_params,
+                                  const quant_data_t *quant_data);
+
+int esp_nn_get_conv_scratch_size_opt(const data_dims_t *input_dims,
+                                     const data_dims_t *filter_dims,
+                                     const data_dims_t *output_dims,
+                                     const conv_params_t *conv_params);
+void esp_nn_set_conv_scratch_buf_opt(const void *buf);
+
+int esp_nn_get_depthwise_conv_scratch_size_opt(const data_dims_t *input_dims,
+                                               const data_dims_t *filter_dims,
+                                               const data_dims_t *output_dims,
+                                               const dw_conv_params_t *conv_params);
+void esp_nn_set_depthwise_conv_scratch_buf_opt(const void *buf);
+
+/* ANSI C function to be hooked up when optimised version needed */
+void esp_nn_set_softmax_scratch_buf_opt(void *buffer);
+
 /**
  * @brief       optimised version of softmax function
  *

+ 83 - 0
code/components/esp-nn/include/esp_nn_defs.h

@@ -0,0 +1,83 @@
+// Copyright 2022 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include <stdint.h>
+
+/**
+ * @brief structure to club data dims
+ * this structure can be used for input, output and filter
+ */
+typedef struct data_dims {
+    int32_t width;
+    int32_t height;
+    int32_t channels;
+
+    int32_t extra; // can be used as batch or any other param
+} data_dims_t;
+
+/**
+ * @brief 2d data structure (width, height)
+ *
+ */
+typedef struct data_2d {
+    int32_t width;
+    int32_t height;
+} data_2d_t;
+
+/**
+ * @brief min/max activation
+ */
+typedef struct act_params {
+    int32_t min;
+    int32_t max;
+} act_params_t;
+
+/**
+ * @brief per channel quant data
+ *
+ * @note number of shift and mult elements are equal to output channels
+ */
+typedef struct quant_data {
+    int32_t *shift;
+    int32_t *mult;
+} quant_data_t;
+
+/**
+ * @brief params specific to convolution 2d
+ *
+ */
+typedef struct conv_params {
+    int32_t in_offset;
+    int32_t out_offset;
+    data_2d_t stride;
+    data_2d_t padding;
+    data_2d_t dilation;
+    act_params_t activation;
+} conv_params_t;
+
+/**
+ * @brief params specific to depthwise convolution 2d
+ *
+ */
+typedef struct dw_conv_params {
+    int32_t in_offset;
+    int32_t out_offset;
+    int32_t ch_mult; // channel multiplier. (in_ch * ch_mult = out_ch)
+    data_2d_t stride;
+    data_2d_t padding;
+    data_2d_t dilation;
+    act_params_t activation;
+} dw_conv_params_t;

+ 24 - 54
code/components/esp-nn/include/esp_nn_esp32s3.h

@@ -19,7 +19,7 @@
 
 #pragma once
 
-#include <stdint.h>
+#include "esp_nn_defs.h"
 #include "esp_nn_ansi_headers.h"
 
 /************************** Basic math functions *****************************/
@@ -85,28 +85,15 @@ void esp_nn_mul_elementwise_s8_esp32s3(const int8_t *input1_data,
  *              optimization notes: Though input_offset is int32 type,
  *              offset values are contained in 8 bits [-128, 127]
  */
-void esp_nn_depthwise_conv_s8_esp32s3(const int8_t *input_data,
-                                      const uint16_t input_wd,
-                                      const uint16_t input_ht,
-                                      const uint16_t channels,
-                                      const int32_t input_offset,
-                                      const uint16_t pad_wd,
-                                      const uint16_t pad_ht,
-                                      const uint16_t stride_wd,
-                                      const uint16_t stride_ht,
-                                      const uint16_t ch_mult,
+void esp_nn_depthwise_conv_s8_esp32s3(const data_dims_t *input_dims,
+                                      const int8_t *input_data,
+                                      const data_dims_t *filter_dims,
                                       const int8_t *filter_data,
-                                      const uint16_t filter_wd,
-                                      const uint16_t filter_ht,
                                       const int32_t *bias,
-                                      int8_t *out_data,
-                                      const uint16_t out_wd,
-                                      const uint16_t out_ht,
-                                      const int32_t out_offset,
-                                      const int32_t *out_shift,
-                                      const int32_t *out_mult,
-                                      const int32_t activation_min,
-                                      const int32_t activation_max);
+                                      const data_dims_t *output_dims,
+                                      int8_t *output_data,
+                                      const dw_conv_params_t *conv_params,
+                                      const quant_data_t *quant_data);
 
 /**
  * @brief       2d - convolution channelwise
@@ -116,43 +103,26 @@ void esp_nn_depthwise_conv_s8_esp32s3(const int8_t *input_data,
  *              inputs type: int8_t, output: int8_t
  *              input offsets: although int32_t, they are contained in 8 bits [-128, 127]
  */
-void esp_nn_conv_s8_esp32s3(const int8_t *input_data,
-                            const uint16_t input_wd,
-                            const uint16_t input_ht,
-                            const uint16_t in_channels,
-                            const int32_t input_offset,
-                            const uint16_t pad_wd,
-                            const uint16_t pad_ht,
-                            const uint16_t stride_wd,
-                            const uint16_t stride_ht,
+void esp_nn_conv_s8_esp32s3(const data_dims_t *input_dims,
+                            const int8_t *input_data,
+                            const data_dims_t *filter_dims,
                             const int8_t *filter_data,
-                            const uint16_t filter_wd,
-                            const uint16_t filter_ht,
                             const int32_t *bias,
-                            int8_t *out_data,
-                            const uint16_t out_wd,
-                            const uint16_t out_ht,
-                            const uint16_t out_channels,
-                            const int32_t out_offset,
-                            const int32_t *out_shift,
-                            const int32_t *out_mult,
-                            const int32_t activation_min,
-                            const int32_t activation_max);
-
-int esp_nn_get_conv_scratch_size_esp32s3(const uint16_t input_wd,
-                                         const uint16_t input_ht,
-                                         const uint16_t in_ch,
-                                         const uint16_t out_ch,
-                                         const uint16_t filter_wd,
-                                         const uint16_t filter_ht);
+                            const data_dims_t *output_dims,
+                            int8_t *output_data,
+                            const conv_params_t *conv_params,
+                            const quant_data_t *quant_data);
+
+int esp_nn_get_conv_scratch_size_esp32s3(const data_dims_t *input_dims,
+                                         const data_dims_t *filter_dims,
+                                         const data_dims_t *output_dims,
+                                         const conv_params_t *conv_params);
 void esp_nn_set_conv_scratch_buf_esp32s3(const void *buf);
 
-int esp_nn_get_depthwise_conv_scratch_size_esp32s3(const uint16_t input_wd,
-                                                   const uint16_t input_ht,
-                                                   const uint16_t channels,
-                                                   const uint16_t ch_mult,
-                                                   const uint16_t filter_wd,
-                                                   const uint16_t filter_ht);
+int esp_nn_get_depthwise_conv_scratch_size_esp32s3(const data_dims_t *input_dims,
+                                                   const data_dims_t *filter_dims,
+                                                   const data_dims_t *output_dims,
+                                                   const dw_conv_params_t *conv_params);
 void esp_nn_set_depthwise_conv_scratch_buf_esp32s3(const void *buf);
 
 /************************** Pooling functions *****************************/

+ 9 - 10
code/components/esp-nn/include/esp_nn_esp32.h → code/components/esp-nn/include/esp_nn_generic_opt.h

@@ -13,28 +13,27 @@
 // limitations under the License.
 
 /**
- * @file        Header definitions to include for esp_nn optimized functions for
- *              the ESP32 platform.
- *              We are hooking up just the C versions for now.
- *              The file hence is exactly same as `esp_nn_ansi_c.h`
+ * @file        Header definitions to include for esp_nn generic optimisations
+ *              For functions which not having optimisations, _ansi versions are picked.
  */
 
 #pragma once
 
+#include "esp_nn_defs.h"
 #include "esp_nn_ansi_headers.h"
 
 #define esp_nn_add_elementwise_s8 esp_nn_add_elementwise_s8_ansi
 #define esp_nn_mul_elementwise_s8 esp_nn_mul_elementwise_s8_ansi
 
-#define esp_nn_depthwise_conv_s8 esp_nn_depthwise_conv_s8_ansi
+#define esp_nn_depthwise_conv_s8 esp_nn_depthwise_conv_s8_opt
 
-#define esp_nn_conv_s8 esp_nn_conv_s8_ansi
+#define esp_nn_conv_s8 esp_nn_conv_s8_opt
 
-#define esp_nn_get_conv_scratch_size esp_nn_get_conv_scratch_size_ansi
-#define esp_nn_set_conv_scratch_buf esp_nn_set_conv_scratch_buf_ansi
+#define esp_nn_get_conv_scratch_size esp_nn_get_conv_scratch_size_opt
+#define esp_nn_set_conv_scratch_buf esp_nn_set_conv_scratch_buf_opt
 
-#define esp_nn_get_depthwise_conv_scratch_size esp_nn_get_depthwise_conv_scratch_size_ansi
-#define esp_nn_set_depthwise_conv_scratch_buf esp_nn_set_depthwise_conv_scratch_buf_ansi
+#define esp_nn_get_depthwise_conv_scratch_size esp_nn_get_depthwise_conv_scratch_size_opt
+#define esp_nn_set_depthwise_conv_scratch_buf esp_nn_set_depthwise_conv_scratch_buf_opt
 
 #define esp_nn_relu6_s8 esp_nn_relu6_s8_ansi
 

+ 50 - 13
code/components/esp-nn/src/common/common_functions.h

@@ -41,15 +41,39 @@
 
 __NN_FORCE_INLINE__ int32_t esp_nn_clz32(uint32_t in)
 {
+#if CONFIG_IDF_TARGET_ARCH_XTENSA
     __asm__ volatile("nsau %0, %0" : "+r" (in));
     return in;
-}
-
-__NN_FORCE_INLINE__ int32_t esp_nn_pick_sat_high32_of64(int64_t val64)
-{
-    int32_t sign = (int32_t) (val64 >> 63);
-    int32_t to_add = sign & ((1ul << 31) - 1);
-    return (int32_t) ((int64_t) (val64 + to_add) >> 31);
+#elif defined(__GNUC__)
+    return __builtin_clz(in);
+#else
+    int32_t count = 32;
+    uint32_t x = in, y = in >> 16;
+    if (y != 0) {
+        count -= 16;
+        x = y;
+    }
+    y = x >> 8;
+    if (y != 0) {
+        count -= 8;
+        x = y;
+    }
+    y = x >> 4;
+    if (y != 0) {
+        count -= 4;
+        x = y;
+    }
+    y = x >> 2;
+    if (y != 0) {
+        count -= 2;
+        x = y;
+    }
+    y = x >> 1;
+    if (y != 0) {
+        return count - 2;
+    }
+    return count - x;
+#endif
 }
 
 /**
@@ -57,8 +81,19 @@ __NN_FORCE_INLINE__ int32_t esp_nn_pick_sat_high32_of64(int64_t val64)
  */
 __NN_FORCE_INLINE__ int32_t esp_nn_saturate8(int32_t in)
 {
+#if CONFIG_IDF_TARGET_ARCH_XTENSA
     __asm__ volatile("clamps %0, %0, 7" : "+a"(in));
     return in;
+#else
+    return max(INT8_MIN, min(in, INT8_MAX));
+#endif
+}
+
+__NN_FORCE_INLINE__ int32_t esp_nn_pick_sat_high32_of64(int64_t val64)
+{
+    int32_t sign = (int32_t) (val64 >> 63);
+    int32_t to_add = sign & ((1ul << 31) - 1);
+    return (int32_t) ((int64_t) (val64 + to_add) >> 31);
 }
 
 __NN_FORCE_INLINE__ int32_t esp_nn_sat_round_doubling_high_mul(int32_t in0, int32_t in1)
@@ -144,7 +179,7 @@ static void esp_nn_aligned_s8_pad_with_value(const int8_t *src, int8_t *dst,
                                              const uint16_t pad_ht)
 {
     /* memset with pad_val */
-    memset(dst, pad_val, ((input_wd + 2 * pad_wd) * (input_ht + 2 * pad_ht)) * channels * 2);
+    memset(dst, pad_val, ((input_wd + 2 * pad_wd) * (input_ht + 2 * pad_ht)) * channels);
     dst += (pad_wd + input_wd + pad_wd) * channels;
 
     for (int i = 0; i < input_ht; i++) {
@@ -156,7 +191,6 @@ static void esp_nn_aligned_s8_pad_with_value(const int8_t *src, int8_t *dst,
     }
 }
 
-#if 0
 static void esp_nn_aligned_s8_pad_end_with_value(const int8_t *src, int8_t *dst,
                                                  const uint16_t input_wd,
                                                  const uint16_t input_ht,
@@ -169,13 +203,16 @@ static void esp_nn_aligned_s8_pad_end_with_value(const int8_t *src, int8_t *dst,
         for (int j = 0; j < input_wd * channels; j++) {
             *dst++ = *src++;
         }
-        memset(dst, pad_val, pad_wd * channels);
-        dst += pad_wd * channels;
+        if (pad_wd) {
+            memset(dst, pad_val, pad_wd * channels);
+            dst += pad_wd * channels;
+        }
     }
     /* pad end `pad_ht` lines at end */
-    memset(dst, pad_val, (input_wd + pad_wd) * pad_ht * channels);
+    if (pad_ht) {
+        memset(dst, pad_val, (input_wd + pad_wd) * pad_ht * channels);
+    }
 }
-#endif
 
 /**
  * @brief       convert 8 bit input data to 16 bit

+ 30 - 26
code/components/esp-nn/src/convolution/esp_nn_conv_ansi.c

@@ -12,16 +12,14 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include <stdint.h>
+#include <esp_nn_defs.h>
 
 #include <common_functions.h>
 
-int esp_nn_get_conv_scratch_size_ansi(const uint16_t input_wd,
-                                      const uint16_t input_ht,
-                                      const uint16_t in_ch,
-                                      const uint16_t out_ch,
-                                      const uint16_t filter_wd,
-                                      const uint16_t filter_ht)
+int esp_nn_get_conv_scratch_size_ansi(const data_dims_t *input_dims,
+                                      const data_dims_t *filter_dims,
+                                      const data_dims_t *output_dims,
+                                      const conv_params_t *conv_params)
 {
     return 0;
 }
@@ -108,29 +106,35 @@ void esp_nn_conv_u8_ansi(const uint8_t *input_data,
  * Assumption 2: Pointers are valid
  * Assumption 3: dialation width = 1
  */
-void esp_nn_conv_s8_ansi(const int8_t *input_data,
-                         const uint16_t input_wd,
-                         const uint16_t input_ht,
-                         const uint16_t in_channels,
-                         const int32_t input_offset,
-                         const uint16_t pad_wd,
-                         const uint16_t pad_ht,
-                         const uint16_t stride_wd,
-                         const uint16_t stride_ht,
+void esp_nn_conv_s8_ansi(const data_dims_t *input_dims,
+                         const int8_t *input_data,
+                         const data_dims_t *filter_dims,
                          const int8_t *filter_data,
-                         const uint16_t filter_wd,
-                         const uint16_t filter_ht,
                          const int32_t *bias,
+                         const data_dims_t *output_dims,
                          int8_t *out_data,
-                         const uint16_t out_wd,
-                         const uint16_t out_ht,
-                         const uint16_t out_channels,
-                         const int32_t out_offset,
-                         const int32_t *out_shift,
-                         const int32_t *out_mult,
-                         const int32_t activation_min,
-                         const int32_t activation_max)
+                         const conv_params_t *conv_params,
+                         const quant_data_t *quant_data)
 {
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t in_channels = input_dims->channels;
+    const int32_t input_offset = conv_params->in_offset;
+    const int32_t out_offset = conv_params->out_offset;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const uint16_t out_channels = output_dims->channels;
+    const int32_t *out_shift = quant_data->shift;
+    const int32_t *out_mult = quant_data->mult;
+    const int32_t activation_min = conv_params->activation.min;
+    const int32_t activation_max = conv_params->activation.max;
+
     int32_t out_ch_idx, out_y, out_x, in_ch_idx, filter_y_idx, filter_x_idx;
 
     for (out_y = 0; out_y < out_ht; out_y++) {

+ 106 - 79
code/components/esp-nn/src/convolution/esp_nn_conv_esp32s3.c

@@ -12,30 +12,30 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include <stdint.h>
 #include <stdio.h>
+#include <esp_nn_defs.h>
 
 #include <common_functions.h>
 
 static int16_t *scratch_buffer = NULL;
 
-extern void esp_nn_conv_s16_mult8_1x1_esp32s3(const int8_t *input_data,
-                                              const uint16_t input_wd,
-                                              const uint16_t input_ht,
-                                              const uint16_t in_channels,
-                                              const int32_t input_offset,
-                                              const int16_t *filter_data,
-                                              const int32_t *bias,
-                                              int8_t *out_data,
-                                              const uint16_t out_wd,
-                                              const uint16_t out_ht,
-                                              const uint16_t out_channels,
-                                              const int32_t out_offset,
-                                              const int32_t *out_shift,
-                                              const int32_t *out_mult,
-                                              const int32_t activation_min,
-                                              const int32_t activation_max,
-                                              void *buffer /* scratch buffer */);
+extern void esp_nn_conv_s8_mult8_1x1_esp32s3(const int8_t *input_data,
+                                             const uint16_t input_wd,
+                                             const uint16_t input_ht,
+                                             const uint16_t in_channels,
+                                             const int32_t input_offset,
+                                             const int8_t *filter_aligned,
+                                             const int32_t *bias,
+                                             int8_t *out_data,
+                                             const uint16_t out_wd,
+                                             const uint16_t out_ht,
+                                             const uint16_t out_channels,
+                                             const int32_t out_offset,
+                                             const int32_t *out_shift,
+                                             const int32_t *out_mult,
+                                             const int32_t activation_min,
+                                             const int32_t activation_max,
+                                             void *buffer /* scratch buffer */);
 
 extern void esp_nn_conv_s16_mult4_1x1_esp32s3(const int16_t *input_data,
                                               const uint16_t input_wd,
@@ -81,34 +81,40 @@ extern void esp_nn_aligned_s8_to_s16_with_offset_esp32s3(const int8_t *src, int1
 
 extern void esp_nn_s8_to_s16_esp32s3(const int8_t *src, int16_t *dst, const int size);
 
-static void esp_nn_conv_s8_unrolled(const int8_t *input_data,
-                                    const uint16_t input_wd,
-                                    const uint16_t input_ht,
-                                    const uint16_t in_channels,
-                                    const int32_t input_offset,
-                                    const uint16_t pad_wd,
-                                    const uint16_t pad_ht,
-                                    const uint16_t stride_wd,
-                                    const uint16_t stride_ht,
+static void esp_nn_conv_s8_unrolled(const data_dims_t *input_dims,
+                                    const int8_t *input_data,
+                                    const data_dims_t *filter_dims,
                                     const int8_t *filter_data,
-                                    const uint16_t filter_wd,
-                                    const uint16_t filter_ht,
                                     const int32_t *bias,
+                                    const data_dims_t *output_dims,
                                     int8_t *out_data,
-                                    const uint16_t out_wd,
-                                    const uint16_t out_ht,
-                                    const uint16_t out_channels,
-                                    const int32_t out_offset,
-                                    const int32_t *out_shift,
-                                    const int32_t *out_mult,
-                                    const int32_t activation_min,
-                                    const int32_t activation_max)
+                                    const conv_params_t *conv_params,
+                                    const quant_data_t *quant_data)
 {
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t in_ch = input_dims->channels;
+    const int32_t input_offset = conv_params->in_offset;
+    const int32_t out_offset = conv_params->out_offset;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const uint16_t out_ch = output_dims->channels;
+    const int32_t *out_shift = quant_data->shift;
+    const int32_t *out_mult = quant_data->mult;
+    const int32_t activation_min = conv_params->activation.min;
+    const int32_t activation_max = conv_params->activation.max;
+
     int32_t out_ch_idx, out_y, out_x, in_ch_idx, filter_y_idx, filter_x_idx;
 
     for (out_y = 0; out_y < out_ht; out_y++) {
         for (out_x = 0; out_x < out_wd; out_x++) {
-            for (out_ch_idx = 0; out_ch_idx < out_channels; out_ch_idx++) {
+            for (out_ch_idx = 0; out_ch_idx < out_ch; out_ch_idx++) {
                 int32_t conv_out = 0;
 
                 const int32_t base_y = stride_ht * out_y - pad_ht;
@@ -124,10 +130,10 @@ static void esp_nn_conv_s8_unrolled(const int8_t *input_data,
                     for (filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) {
                         const int32_t in_row = base_y + filter_y_idx;
                         const int32_t in_col = base_x + filter_x_idx;
-                        int32_t input_base_offset = (in_row * input_wd + in_col) * in_channels;
-                        int32_t filter_base_offset = out_ch_idx * in_channels * filter_ht * filter_wd +
-                                                       (filter_y_idx * filter_wd + filter_x_idx) * in_channels;
-                        for (in_ch_idx = 0; in_ch_idx < in_channels; in_ch_idx++) {
+                        int32_t input_base_offset = (in_row * input_wd + in_col) * in_ch;
+                        int32_t filter_base_offset = out_ch_idx * in_ch * filter_ht * filter_wd +
+                                                       (filter_y_idx * filter_wd + filter_x_idx) * in_ch;
+                        for (in_ch_idx = 0; in_ch_idx < in_ch; in_ch_idx++) {
                             conv_out +=
                                 (input_data[input_base_offset + in_ch_idx] + input_offset) *
                                 filter_data[filter_base_offset + in_ch_idx];
@@ -332,18 +338,35 @@ static void esp_nn_conv_s8_pad_valid_ch3_3x3(const int8_t *input_data,
     }
 }
 
-int esp_nn_get_conv_scratch_size_esp32s3(const uint16_t input_wd,
-                                         const uint16_t input_ht,
-                                         const uint16_t in_ch,
-                                         const uint16_t out_ch,
-                                         const uint16_t filter_wd,
-                                         const uint16_t filter_ht)
+int esp_nn_get_conv_scratch_size_esp32s3(const data_dims_t *input_dims,
+                                         const data_dims_t *filter_dims,
+                                         const data_dims_t *output_dims,
+                                         const conv_params_t *conv_params)
 {
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t in_ch = input_dims->channels;
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+    const uint16_t out_ch = output_dims->channels;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+
     int filter_size = filter_wd * filter_ht * in_ch * out_ch;
     int input_size = input_wd * input_ht * in_ch;
-    int transpose_buf_size = 8 * in_ch; /* to store intermediate data */
+
+    int transpose_buf_size = 2 * (8 * in_ch); /* to store intermediate data */
+    if (input_wd * input_ht < 8) {
+        transpose_buf_size = 0; // not using this for leftover
+    }
     int align_buf_size = 32; /* extra buffer for alignment */
-    return 2 * (filter_size + input_size +  transpose_buf_size) + align_buf_size;
+    if (in_ch % 8 == 0 && filter_wd == 1 && filter_ht == 1 &&
+            pad_wd == 0 && pad_ht == 0 && stride_wd == 1 && stride_ht == 1) {
+        return filter_size + transpose_buf_size + align_buf_size;
+    }
+    return 2 * (filter_size + input_size) +  transpose_buf_size + align_buf_size;
 }
 
 void esp_nn_set_conv_scratch_buf_esp32s3(void *buf)
@@ -351,29 +374,35 @@ void esp_nn_set_conv_scratch_buf_esp32s3(void *buf)
     scratch_buffer = (int16_t *) buf;
 }
 
-void esp_nn_conv_s8_esp32s3(const int8_t *input,
-                            const uint16_t input_wd,
-                            const uint16_t input_ht,
-                            const uint16_t channels,
-                            const int32_t input_offset,
-                            const uint16_t pad_wd,
-                            const uint16_t pad_ht,
-                            const uint16_t stride_wd,
-                            const uint16_t stride_ht,
+void esp_nn_conv_s8_esp32s3(const data_dims_t *input_dims,
+                            const int8_t *input,
+                            const data_dims_t *filter_dims,
                             const int8_t *filter_data,
-                            const uint16_t filter_wd,
-                            const uint16_t filter_ht,
                             const int32_t *bias,
+                            const data_dims_t *output_dims,
                             int8_t *out_data,
-                            const uint16_t out_wd,
-                            const uint16_t out_ht,
-                            const uint16_t out_channels,
-                            const int32_t out_offset,
-                            const int32_t *out_shift,
-                            const int32_t *out_mult,
-                            const int32_t activation_min,
-                            const int32_t activation_max)
+                            const conv_params_t *conv_params,
+                            const quant_data_t *quant_data)
 {
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t channels = input_dims->channels;
+    const int32_t input_offset = conv_params->in_offset;
+    const int32_t out_offset = conv_params->out_offset;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const uint16_t out_channels = output_dims->channels;
+    const int32_t *out_shift = quant_data->shift;
+    const int32_t *out_mult = quant_data->mult;
+    const int32_t activation_min = conv_params->activation.min;
+    const int32_t activation_max = conv_params->activation.max;
+
     int filter_size = filter_wd * filter_ht * channels * out_channels;
     int input_size = input_wd * input_ht * channels;
     int align_len = 16 - (filter_size & 15);
@@ -387,15 +416,16 @@ void esp_nn_conv_s8_esp32s3(const int8_t *input,
 
     if (channels % 8 == 0 && filter_wd == 1 && filter_ht == 1 &&
             pad_wd == 0 && pad_ht == 0 && stride_wd == 1 && stride_ht == 1) {
-        int scratch_offset = (int) (filter_data16 + filter_size);
+        int8_t *filter_aligned = (int8_t *) scratch_buffer;
+        int scratch_offset = (int) (filter_aligned + filter_size);
         void *scratch_buf = (void *) (scratch_offset + 16 - (scratch_offset & 15));
-        esp_nn_s8_to_s16_esp32s3(filter_data, filter_data16, filter_size);
-        esp_nn_conv_s16_mult8_1x1_esp32s3(
-            input, input_wd, input_ht, channels, input_offset, filter_data16,
+        memcpy(filter_aligned, filter_data, filter_size); // copy to aligned address
+        esp_nn_conv_s8_mult8_1x1_esp32s3(
+            input, input_wd, input_ht, channels, input_offset, filter_aligned,
             bias, out_data, out_wd, out_ht, out_channels, out_offset,
             out_shift, out_mult, activation_min, activation_max, scratch_buf);
     } else if (channels % 4 == 0 && filter_wd == 1 && filter_ht == 1 &&
-            (input_wd * input_ht) % 16 == 0 && /* TODO: remove this check */
+            (input_wd * input_ht) % 4 == 0 && /* TODO: remove this check */
             pad_wd == 0 && pad_ht == 0 && stride_wd == 1 && stride_ht == 1) {
         int scratch_offset = (int) (input_data16 + input_size);
         void *scratch_buf = (void *) (scratch_offset + 16 - (scratch_offset & 15));
@@ -427,10 +457,7 @@ void esp_nn_conv_s8_esp32s3(const int8_t *input,
         }
     } else {
         /* Basic unrolled version */
-        esp_nn_conv_s8_unrolled(input, input_wd, input_ht, channels, input_offset,
-                                pad_wd, pad_ht, stride_wd, stride_ht,
-                                filter_data, filter_wd, filter_ht, bias,
-                                out_data, out_wd, out_ht, out_channels, out_offset, out_shift,
-                                out_mult, activation_min, activation_max);
+        esp_nn_conv_s8_unrolled(input_dims, input, filter_dims, filter_data,
+                                bias, output_dims, out_data, conv_params, quant_data);
     }
 }

+ 179 - 0
code/components/esp-nn/src/convolution/esp_nn_conv_opt.c

@@ -0,0 +1,179 @@
+// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <esp_nn_defs.h>
+
+#include <common_functions.h>
+
+int esp_nn_get_conv_scratch_size_opt(const data_dims_t *input_dims,
+                                     const data_dims_t *filter_dims,
+                                     const data_dims_t *output_dims,
+                                     const conv_params_t *conv_params)
+{
+    return 0;
+}
+
+void esp_nn_set_conv_scratch_buf_opt(const void *buf)
+{
+
+}
+
+__attribute__ ((noinline))
+static void esp_nn_conv_s8_1x1(const data_dims_t *input_dims,
+                               const int8_t *input_data,
+                               const int8_t *filter_data,
+                               const int32_t *bias,
+                               const data_dims_t *output_dims,
+                               int8_t *out_data,
+                               const conv_params_t *conv_params,
+                               const quant_data_t *quant_data)
+{
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t in_channels = input_dims->channels;
+    const int32_t input_offset = conv_params->in_offset;
+    const int32_t out_offset = conv_params->out_offset;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const uint16_t out_channels = output_dims->channels;
+    const int32_t activation_min = conv_params->activation.min;
+    const int32_t activation_max = conv_params->activation.max;
+
+    for (int32_t in_row = 0; in_row < out_ht * stride_ht; in_row += stride_ht) {
+        for (int32_t in_col = 0; in_col < out_wd * stride_wd; in_col += stride_wd) {
+            const int32_t *out_mult = quant_data->mult;
+            const int32_t *out_shift = quant_data->shift;
+            const int8_t *filter_ptr = filter_data;
+            const int8_t *input_base_ptr = input_data + (in_row * input_wd + in_col) * in_channels;
+            int32_t out_ch_idx = 0;
+            for (; out_ch_idx < out_channels; out_ch_idx++) {
+                int32_t conv_out = 0;
+
+                const int8_t *input_ptr = input_base_ptr;
+
+                int32_t in_ch_idx = 0;
+                for (; in_ch_idx < in_channels - 3; in_ch_idx += 4) {
+                    conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                    conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                    conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                    conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                }
+                for (; in_ch_idx < in_channels; in_ch_idx ++) {
+                    conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                }
+                if (bias) {
+                    conv_out += bias[out_ch_idx];
+                }
+                conv_out = esp_nn_multiply_by_quantized_mult_fast(conv_out, *out_mult++, *out_shift++);
+                conv_out += out_offset;
+                conv_out = max(conv_out, activation_min);
+                conv_out = min(conv_out, activation_max);
+                *out_data++ = (int8_t) conv_out;
+            }
+        }
+    }
+}
+
+/**
+ * Assumption 1: i/p channels == o/p channels
+ * Assumption 2: Pointers are valid
+ * Assumption 3: dialation width = 1
+ */
+void esp_nn_conv_s8_opt(const data_dims_t *input_dims,
+                        const int8_t *input_data,
+                        const data_dims_t *filter_dims,
+                        const int8_t *filter_data,
+                        const int32_t *bias,
+                        const data_dims_t *output_dims,
+                        int8_t *out_data,
+                        const conv_params_t *conv_params,
+                        const quant_data_t *quant_data)
+{
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+
+    if (filter_wd == 1 && filter_ht == 1) {
+        esp_nn_conv_s8_1x1(input_dims, input_data, filter_data, bias,
+                           output_dims, out_data, conv_params, quant_data);
+        return;
+    }
+
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t in_channels = input_dims->channels;
+    const int32_t input_offset = conv_params->in_offset;
+    const int32_t out_offset = conv_params->out_offset;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const uint16_t out_channels = output_dims->channels;
+    const int32_t activation_min = conv_params->activation.min;
+    const int32_t activation_max = conv_params->activation.max;
+
+    int32_t out_ch_idx, out_y, out_x, filter_y_idx, filter_x_idx;
+
+    for (out_y = 0; out_y < out_ht; out_y++) {
+        for (out_x = 0; out_x < out_wd; out_x++) {
+            const int32_t *out_shift = quant_data->shift;
+            const int32_t *out_mult = quant_data->mult;
+            for (out_ch_idx = 0; out_ch_idx < out_channels; out_ch_idx++) {
+                int32_t conv_out = 0;
+
+                const int32_t base_y = stride_ht * out_y - pad_ht;
+                const int32_t base_x = stride_wd * out_x - pad_wd;
+
+                const int32_t filter_y_start = max(0, -base_y);
+                const int32_t filter_x_start = max(0, -base_x);
+
+                const int32_t filter_y_end = min(filter_ht, input_ht - base_y);
+                const int32_t filter_x_end = min(filter_wd, input_wd - base_x);
+
+                for (filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) {
+                    for (filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) {
+                        const int32_t in_row = base_y + filter_y_idx;
+                        const int32_t in_col = base_x + filter_x_idx;
+
+                        const int8_t *input_ptr = input_data +
+                                        (in_row * input_wd + in_col) * in_channels;
+                        const int8_t *filter_ptr = filter_data +
+                                        out_ch_idx * in_channels * filter_ht * filter_wd +
+                                        (filter_y_idx * filter_wd + filter_x_idx) * in_channels;
+                        int32_t in_ch_idx = 0;
+                        for (; in_ch_idx < in_channels - 3; in_ch_idx += 4) {
+                            conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                            conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                            conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                            conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                        }
+                        for (; in_ch_idx < in_channels; in_ch_idx ++) {
+                            conv_out += (*input_ptr++ + input_offset) * *filter_ptr++;
+                        }
+                    }
+                }
+                if (bias) {
+                    conv_out += bias[out_ch_idx];
+                }
+                conv_out = esp_nn_multiply_by_quantized_mult_fast(conv_out, *out_mult++, *out_shift++);
+                conv_out += out_offset;
+                conv_out = max(conv_out, activation_min);
+                conv_out = min(conv_out, activation_max);
+                *out_data++ = (int8_t) conv_out;
+            }
+        }
+    }
+}

+ 30 - 27
code/components/esp-nn/src/convolution/esp_nn_depthwise_conv_ansi.c

@@ -12,16 +12,13 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include <stdint.h>
-
+#include <esp_nn_defs.h>
 #include <common_functions.h>
 
-int esp_nn_get_depthwise_conv_scratch_size_ansi(const uint16_t input_wd,
-                                                const uint16_t input_ht,
-                                                const uint16_t channels,
-                                                const uint16_t ch_mult,
-                                                const uint16_t filter_wd,
-                                                const uint16_t filter_ht)
+int esp_nn_get_depthwise_conv_scratch_size_ansi(const data_dims_t *input_dims,
+                                                const data_dims_t *filter_dims,
+                                                const data_dims_t *output_dims,
+                                                const dw_conv_params_t *conv_params)
 {
     return 0;
 }
@@ -31,29 +28,35 @@ void esp_nn_set_depthwise_conv_scratch_buf_ansi(const void *buf)
 
 }
 
-void esp_nn_depthwise_conv_s8_ansi(const int8_t *input_data,
-                                   const uint16_t input_wd,
-                                   const uint16_t input_ht,
-                                   const uint16_t channels,
-                                   const int32_t input_offset,
-                                   const uint16_t pad_wd,
-                                   const uint16_t pad_ht,
-                                   const uint16_t stride_wd,
-                                   const uint16_t stride_ht,
-                                   const uint16_t ch_mult,
+void esp_nn_depthwise_conv_s8_ansi(const data_dims_t *input_dims,
+                                   const int8_t *input_data,
+                                   const data_dims_t *filter_dims,
                                    const int8_t *filter_data,
-                                   const uint16_t filter_wd,
-                                   const uint16_t filter_ht,
                                    const int32_t *bias,
+                                   const data_dims_t *output_dims,
                                    int8_t *out_data,
-                                   const uint16_t out_wd,
-                                   const uint16_t out_ht,
-                                   const int32_t out_offset,
-                                   const int32_t *out_shift,
-                                   const int32_t *out_mult,
-                                   const int32_t activation_min,
-                                   const int32_t activation_max)
+                                   const dw_conv_params_t *conv_params,
+                                   const quant_data_t *quant_data)
 {
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t channels = input_dims->channels;
+    const int32_t input_offset = conv_params->in_offset;
+    const int32_t out_offset = conv_params->out_offset;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const int32_t *out_shift = quant_data->shift;
+    const int32_t *out_mult = quant_data->mult;
+    const int32_t activation_min = conv_params->activation.min;
+    const int32_t activation_max = conv_params->activation.max;
+    const uint16_t ch_mult = conv_params->ch_mult;
+
     int out_idx = 0;
     for (int out_y = 0; out_y < out_ht; out_y++) { //height loop
         const int16_t base_y = (out_y * stride_ht) - pad_ht;

+ 291 - 0
code/components/esp-nn/src/convolution/esp_nn_depthwise_conv_opt.c

@@ -0,0 +1,291 @@
+// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <esp_nn_defs.h>
+#include <common_functions.h>
+
+int esp_nn_get_depthwise_conv_scratch_size_opt(const data_dims_t *input_dims,
+                                               const data_dims_t *filter_dims,
+                                               const data_dims_t *output_dims,
+                                               const dw_conv_params_t *conv_params)
+{
+    return 0;
+}
+
+void esp_nn_set_depthwise_conv_scratch_buf_opt(const void *buf)
+{
+
+}
+
+/* common channel multiplier == 1 case */
+__attribute__ ((noinline))
+static void esp_nn_depthwise_conv_s8_ch_mult_1(const data_dims_t *input_dims,
+                                               const int8_t *input_data,
+                                               const data_dims_t *filter_dims,
+                                               const int8_t *filter_data,
+                                               const int32_t *bias,
+                                               const data_dims_t *output_dims,
+                                               int8_t *out_data,
+                                               const dw_conv_params_t *conv_params,
+                                               const quant_data_t *quant_data)
+{
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t channels = input_dims->channels;
+    const int32_t input_offset = conv_params->in_offset;
+    const int32_t out_offset = conv_params->out_offset;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const int32_t activation_min = conv_params->activation.min;
+    const int32_t activation_max = conv_params->activation.max;
+
+    int out_idx = 0;
+    for (int out_y = 0; out_y < out_ht; out_y++) { //height loop
+        const int16_t base_y = (out_y * stride_ht) - pad_ht;
+        for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop
+            const int16_t base_x = (out_x * stride_wd) - pad_wd;
+
+            const int32_t *out_shift = quant_data->shift;
+            const int32_t *out_mult = quant_data->mult;
+
+            /* Select filter so as the point doesn't lie outside block */
+            int filter_y_start = max(0, -base_y);
+            int filter_x_start = max(0, -base_x);
+            int filter_y_end = min(filter_ht, input_ht - base_y);
+            int filter_x_end = min(filter_wd, input_wd - base_x);
+
+            int ch_idx = 0;
+            for (; ch_idx < channels - 3; ch_idx += 4) {//channel_loop
+                int32_t result0 = 0;
+                int32_t result1 = 0;
+                int32_t result2 = 0;
+                int32_t result3 = 0;
+
+                for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) {
+                    const int32_t idx_y = base_y + filter_y_idx;
+                    for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) {
+                        const int32_t idx_x = base_x + filter_x_idx;
+                        int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx;
+                        int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels) + ch_idx;
+                        int32_t input_val0 = input_data[input_index + 0] + input_offset;
+                        int32_t input_val1 = input_data[input_index + 1] + input_offset;
+                        int32_t input_val2 = input_data[input_index + 2] + input_offset;
+                        int32_t input_val3 = input_data[input_index + 3] + input_offset;
+                        int32_t filter_val0 = filter_data[filter_index + 0];
+                        int32_t filter_val1 = filter_data[filter_index + 1];
+                        int32_t filter_val2 = filter_data[filter_index + 2];
+                        int32_t filter_val3 = filter_data[filter_index + 3];
+                        result0 += input_val0 * filter_val0;
+                        result1 += input_val1 * filter_val1;
+                        result2 += input_val2 * filter_val2;
+                        result3 += input_val3 * filter_val3;
+                    }
+                }
+                if (bias) {
+                    result0 += bias[ch_idx + 0];
+                    result1 += bias[ch_idx + 1];
+                    result2 += bias[ch_idx + 2];
+                    result3 += bias[ch_idx + 3];
+                }
+                result0 = esp_nn_multiply_by_quantized_mult_fast(result0, *out_mult++, *out_shift++);
+                result1 = esp_nn_multiply_by_quantized_mult_fast(result1, *out_mult++, *out_shift++);
+                result2 = esp_nn_multiply_by_quantized_mult_fast(result2, *out_mult++, *out_shift++);
+                result3 = esp_nn_multiply_by_quantized_mult_fast(result3, *out_mult++, *out_shift++);
+
+                result0 += out_offset;
+                result1 += out_offset;
+                result2 += out_offset;
+                result3 += out_offset;
+
+                result0 = max(result0, activation_min);
+                result1 = max(result1, activation_min);
+                result2 = max(result2, activation_min);
+                result3 = max(result3, activation_min);
+
+                result0 = min(result0, activation_max);
+                result1 = min(result1, activation_max);
+                result2 = min(result2, activation_max);
+                result3 = min(result3, activation_max);
+
+                out_data[out_idx++] = result0;
+                out_data[out_idx++] = result1;
+                out_data[out_idx++] = result2;
+                out_data[out_idx++] = result3;
+            }
+            for (; ch_idx < channels; ch_idx++) {//channel_loop
+                int32_t result = 0;
+
+                for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) {
+                    const int32_t idx_y = base_y + filter_y_idx;
+                    for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) {
+                        const int32_t idx_x = base_x + filter_x_idx;
+                        int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx;
+                        int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels) + ch_idx;
+                        int32_t input_val = input_data[input_index] + input_offset;
+                        int32_t filter_val = filter_data[filter_index];
+                        result += input_val * filter_val;
+                    }
+                }
+                if (bias) {
+                    result += bias[ch_idx];
+                }
+                result = esp_nn_multiply_by_quantized_mult_fast(result, *out_mult++, *out_shift++);
+                result += out_offset;
+                result = max(result, activation_min);
+                result = min(result, activation_max);
+
+                out_data[out_idx++] = result;
+            }
+        }
+    }
+}
+
+void esp_nn_depthwise_conv_s8_opt(const data_dims_t *input_dims,
+                                  const int8_t *input_data,
+                                  const data_dims_t *filter_dims,
+                                  const int8_t *filter_data,
+                                  const int32_t *bias,
+                                  const data_dims_t *output_dims,
+                                  int8_t *out_data,
+                                  const dw_conv_params_t *conv_params,
+                                  const quant_data_t *quant_data)
+{
+    const uint16_t ch_mult = conv_params->ch_mult;
+    if (ch_mult == 1) {
+        esp_nn_depthwise_conv_s8_ch_mult_1(input_dims, input_data, filter_dims, filter_data,
+                                           bias, output_dims, out_data, conv_params, quant_data);
+        return;
+    }
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t channels = input_dims->channels;
+    const int32_t input_offset = conv_params->in_offset;
+    const int32_t out_offset = conv_params->out_offset;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const int32_t activation_min = conv_params->activation.min;
+    const int32_t activation_max = conv_params->activation.max;
+
+    int out_idx = 0;
+    for (int out_y = 0; out_y < out_ht; out_y++) { //height loop
+        const int16_t base_y = (out_y * stride_ht) - pad_ht;
+        for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop
+            const int16_t base_x = (out_x * stride_wd) - pad_wd;
+
+            const int32_t *out_shift = quant_data->shift;
+            const int32_t *out_mult = quant_data->mult;
+
+            /* Select filter so as the point doesn't lie outside block */
+            int filter_y_start = max(0, -base_y);
+            int filter_x_start = max(0, -base_x);
+            int filter_y_end = min(filter_ht, input_ht - base_y);
+            int filter_x_end = min(filter_wd, input_wd - base_x);
+
+            for (int ch_idx = 0; ch_idx < channels; ch_idx++) {//channel_loop
+                int ch_mult_idx = 0;
+                for (; ch_mult_idx < ch_mult - 3; ch_mult_idx += 4) {
+                    int32_t result0 = 0;
+                    int32_t result1 = 0;
+                    int32_t result2 = 0;
+                    int32_t result3 = 0;
+                    const int out_ch_idx =  ch_idx * ch_mult + ch_mult_idx;
+
+                    for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) {
+                        const int32_t idx_y = base_y + filter_y_idx;
+                        for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) {
+                            const int32_t idx_x = base_x + filter_x_idx;
+                            int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx;
+                            int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels * ch_mult) + out_ch_idx;
+                            int32_t input_val = input_data[input_index] + input_offset;
+                            int32_t filter_val0 = filter_data[filter_index + 0];
+                            int32_t filter_val1 = filter_data[filter_index + 1];
+                            int32_t filter_val2 = filter_data[filter_index + 2];
+                            int32_t filter_val3 = filter_data[filter_index + 3];
+                            result0 += input_val * filter_val0;
+                            result1 += input_val * filter_val1;
+                            result2 += input_val * filter_val2;
+                            result3 += input_val * filter_val3;
+                        }
+                    }
+                    if (bias) {
+                        result0 += bias[out_ch_idx + 0];
+                        result1 += bias[out_ch_idx + 1];
+                        result2 += bias[out_ch_idx + 2];
+                        result3 += bias[out_ch_idx + 3];
+                    }
+                    result0 = esp_nn_multiply_by_quantized_mult_fast(result0, *out_mult++, *out_shift++);
+                    result1 = esp_nn_multiply_by_quantized_mult_fast(result1, *out_mult++, *out_shift++);
+                    result2 = esp_nn_multiply_by_quantized_mult_fast(result2, *out_mult++, *out_shift++);
+                    result3 = esp_nn_multiply_by_quantized_mult_fast(result3, *out_mult++, *out_shift++);
+
+                    result0 += out_offset;
+                    result1 += out_offset;
+                    result2 += out_offset;
+                    result3 += out_offset;
+
+                    result0 = max(result0, activation_min);
+                    result1 = max(result1, activation_min);
+                    result2 = max(result2, activation_min);
+                    result3 = max(result3, activation_min);
+                    result0 = min(result0, activation_max);
+                    result1 = min(result1, activation_max);
+                    result2 = min(result2, activation_max);
+                    result3 = min(result3, activation_max);
+
+                    out_data[out_idx++] = result0;
+                    out_data[out_idx++] = result1;
+                    out_data[out_idx++] = result2;
+                    out_data[out_idx++] = result3;
+                }
+                for (; ch_mult_idx < ch_mult; ch_mult_idx++) {
+                    int32_t result = 0;
+                    const int out_ch_idx =  ch_idx * ch_mult + ch_mult_idx;
+
+                    for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) {
+                        const int32_t idx_y = base_y + filter_y_idx;
+                        for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) {
+                            const int32_t idx_x = base_x + filter_x_idx;
+                            int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx;
+                            int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels * ch_mult) + out_ch_idx;
+                            int32_t input_val = input_data[input_index] + input_offset;
+                            int32_t filter_val = filter_data[filter_index];
+                            result += input_val * filter_val;
+                        }
+                    }
+                    if (bias) {
+                        result += bias[out_ch_idx];
+                    }
+                    result = esp_nn_multiply_by_quantized_mult_fast(result, *out_mult++, *out_shift++);
+                    result += out_offset;
+                    result = max(result, activation_min);
+                    result = min(result, activation_max);
+
+                    out_data[out_idx++] = result;
+                }
+            }
+        }
+    }
+}

+ 97 - 37
code/components/esp-nn/src/convolution/esp_nn_depthwise_conv_s8_esp32s3.c

@@ -12,8 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include <stdint.h>
 #include <stdio.h>
+#include <esp_nn_defs.h>
 
 #include <common_functions.h>
 
@@ -353,17 +353,59 @@ void esp_nn_depthwise_conv_s8_ch_mult1(const int8_t *input_data,
     }
 }
 
-int esp_nn_get_depthwise_conv_scratch_size_esp32s3(const uint16_t input_wd,
-                                                   const uint16_t input_ht,
-                                                   const uint16_t channels,
-                                                   const uint16_t ch_mult,
-                                                   const uint16_t filter_wd,
-                                                   const uint16_t filter_ht)
+int esp_nn_get_depthwise_conv_scratch_size_esp32s3(const data_dims_t *input_dims,
+                                                   const data_dims_t *filter_dims,
+                                                   const data_dims_t *output_dims,
+                                                   const dw_conv_params_t *conv_params)
 {
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t channels = input_dims->channels;
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+    const uint16_t ch_mult = conv_params->ch_mult;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+
     int filter_size = filter_wd * filter_ht * channels * ch_mult;
-    int padding_used = ((filter_wd == 3) && (filter_ht == 3)) * 2;
-    int input_size = (input_wd + padding_used) * (input_ht + padding_used) * channels;
-    return  2 * (filter_size + input_size) + 16; //16 for alignment
+    int pad_width = 0, pad_height = 0;
+
+    if ((ch_mult == 1) && (channels % 8 == 0) && (filter_wd == 3) && (filter_ht == 3)) {
+        if (channels % 16 == 0) {
+            if (pad_wd || pad_ht) {
+                pad_width = pad_wd * 2;
+                pad_height = pad_ht * 2;
+            } else {
+                // check if we need to pad additionally
+                pad_width = (out_wd * stride_wd + filter_wd - 1) - input_wd;
+                pad_height = (out_ht * stride_ht + filter_ht - 1) - input_ht;
+                // printf("in(%d %d %d), out(%d %d), filter (%d %d) stride (%d %d), pad (%d %d)",
+                //         input_wd, input_ht, channels, out_wd, out_ht, filter_wd, filter_ht,
+                //         stride_wd, stride_ht, pad_wd, pad_ht);
+            }
+            if (pad_width || pad_height) {
+                int input_size = (input_wd + pad_width) * (input_ht + pad_height) * channels;
+                // printf("ask1 %d\n", filter_size + input_size + 16);
+                return filter_size + input_size + 16;  // 16 for alignment
+            } else {
+                // printf("ask2 %d\n", filter_size + 16);
+                return filter_size + 16;  // 16 for alignment
+            }
+        } else {
+            int input_size = input_wd * input_ht * channels;
+            // printf("ask3 %d\n", 2 * (filter_size + input_size) + 16);
+            return  2 * (filter_size + input_size) + 16; // 16 for alignment
+        }
+    } else if (ch_mult % 4 == 0) {
+        int input_size = input_wd * input_ht * channels;
+        // printf("ask4 %d\n", 2 * (filter_size + input_size) + 16);
+        return  2 * (filter_size + input_size) + 16; // 16 for alignment
+    }
+    return 32; // just few bytes
 }
 
 void esp_nn_set_depthwise_conv_scratch_buf_esp32s3(void *buf)
@@ -376,29 +418,38 @@ void esp_nn_set_depthwise_conv_scratch_buf_esp32s3(void *buf)
  * Assumption 2: Pointers are valid
  * Assumption 3: dialation width = 1
  */
-void esp_nn_depthwise_conv_s8_esp32s3(const int8_t *input_data,
-                                      const uint16_t input_wd,
-                                      const uint16_t input_ht,
-                                      const uint16_t channels,
-                                      const int32_t input_offset,
-                                      const uint16_t pad_wd,
-                                      const uint16_t pad_ht,
-                                      const uint16_t stride_wd,
-                                      const uint16_t stride_ht,
-                                      const uint16_t ch_mult,
+
+
+
+void esp_nn_depthwise_conv_s8_esp32s3(const data_dims_t *input_dims,
+                                      const int8_t *input_data,
+                                      const data_dims_t *filter_dims,
                                       const int8_t *filter_data,
-                                      const uint16_t filter_wd,
-                                      const uint16_t filter_ht,
                                       const int32_t *bias,
+                                      const data_dims_t *output_dims,
                                       int8_t *out_data,
-                                      const uint16_t out_wd,
-                                      const uint16_t out_ht,
-                                      const int32_t out_offset,
-                                      const int32_t *out_shift,
-                                      const int32_t *out_mult,
-                                      const int32_t activation_min,
-                                      const int32_t activation_max)
+                                      const dw_conv_params_t *conv_params,
+                                      const quant_data_t *quant_data)
 {
+    const uint16_t input_wd = input_dims->width;
+    const uint16_t input_ht = input_dims->height;
+    const uint16_t channels = input_dims->channels;
+    const int32_t input_offset = conv_params->in_offset;
+    const int32_t out_offset = conv_params->out_offset;
+    const uint16_t pad_wd = conv_params->padding.width;
+    const uint16_t pad_ht = conv_params->padding.height;
+    const uint16_t stride_wd = conv_params->stride.width;
+    const uint16_t stride_ht = conv_params->stride.height;
+    const uint16_t filter_wd = filter_dims->width;
+    const uint16_t filter_ht = filter_dims->height;
+    const uint16_t out_wd = output_dims->width;
+    const uint16_t out_ht = output_dims->height;
+    const int32_t *out_shift = quant_data->shift;
+    const int32_t *out_mult = quant_data->mult;
+    const int32_t activation_min = conv_params->activation.min;
+    const int32_t activation_max = conv_params->activation.max;
+    const uint16_t ch_mult = conv_params->ch_mult;
+
     int filter_size = filter_wd * filter_ht * channels * ch_mult;
     int align_len = 16 - (filter_size & 15);
     int input_size = input_wd * input_ht * channels;
@@ -423,18 +474,27 @@ void esp_nn_depthwise_conv_s8_esp32s3(const int8_t *input_data,
                                                                   stride_wd, stride_ht, filter_aligned, bias,
                                                                   out_data, out_wd, out_ht, out_offset, out_shift,
                                                                   out_mult, activation_min, activation_max);
-            } else if ((pad_wd == 0) && (pad_ht == 0) &&
-                    // because this does not handle padding offset cases yet, run just for stride (1, 1).
-                    // end padding of input with `-input_offset` should solve this
-                    (stride_wd == 1) && (stride_ht == 1)) {
+            } else if ((channels % 16 == 0) && (pad_wd == 0) && (pad_ht == 0)) {
                 /* process in 8 bits */
                 int8_t *filter_aligned = (int8_t *) scratch_buffer;
+                int8_t *input_padded = (int8_t *) scratch_buffer + filter_size + align_len;
+
+                // check if we need to pad additionally
+                int pad_right = (out_wd * stride_wd + filter_wd - 1) - input_wd;
+                int pad_bottom = (out_ht * stride_ht + filter_ht - 1) - input_ht;
+                if (pad_right || pad_bottom) { // pad right and bottom
+                    esp_nn_aligned_s8_pad_end_with_value(input_data, input_padded, input_wd, input_ht,
+                                                         channels, -input_offset, pad_right, pad_bottom);
+                } else {
+                    input_padded = (int8_t *) input_data;
+                }
                 memcpy(filter_aligned, filter_data, filter_size);
-                esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3(input_data, input_wd, input_ht, channels, input_offset,
-                                                                  stride_wd, stride_ht, filter_aligned,
-                                                                  bias, out_data, out_wd, out_ht, out_offset, out_shift,
+                esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3(input_padded, input_wd + pad_right,
+                                                                  input_ht + pad_bottom, channels, input_offset,
+                                                                  stride_wd, stride_ht, filter_aligned, bias,
+                                                                  out_data, out_wd, out_ht, out_offset, out_shift,
                                                                   out_mult, activation_min, activation_max);
-            } else { /* (channels % 8) == 0 && pad_wd == 1 && pad_ht == 1 */
+            } else { /* (channels % 8) == 0 */
                 esp_nn_s8_to_s16_esp32s3(filter_data, filter_data16, filter_size);
                 esp_nn_aligned_s8_to_s16_with_offset_esp32s3(input_data, input_data16, input_size, input_offset);
                 esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3(input_data16, input_wd, input_ht, channels,

+ 8 - 0
code/components/esp-nn/test_app/sdkconfig.defaults.esp32s3

@@ -0,0 +1,8 @@
+# Default configurations for ESP32-S3
+
+CONFIG_ESP32S3_DEFAULT_CPU_FREQ_240=y
+CONFIG_ESP32S3_SPIRAM_SUPPORT=y
+
+CONFIG_ESP32S3_DATA_CACHE_64KB=y
+CONFIG_ESP32S3_DATA_CACHE_8WAYS=y
+CONFIG_ESP32S3_DATA_CACHE_LINE_64B=y

+ 16 - 4
code/components/esp-nn/tests/src/basic_math_test.c

@@ -23,7 +23,9 @@
 #include "test_utils.h"
 
 #if CONFIG_IDF_CMAKE
+#if (CONFIG_SPIRAM_SUPPORT && (CONFIG_SPIRAM_USE_CAPS_ALLOC || CONFIG_SPIRAM_USE_MALLOC))
 #define IDF_HEAP_CAPS 1
+#endif
 
 #if IDF_HEAP_CAPS
 #include "esp_heap_caps.h"
@@ -138,6 +140,11 @@ void esp_nn_add_elementwise_s8_test()
         out_c_orig = out_data_c;
         out_opt_orig = out_data_opt;
 #endif
+        if (input1_orig == NULL || input2_orig == NULL || out_c_orig == NULL ||
+                out_opt_orig == NULL) {
+            printf(ANSI_COLOR_RED"%s error allocating buffers\n"ANSI_COLOR_RESET, __FUNCTION__);
+            goto elementwise_add_test_cleanup;
+        }
 
         for (int i = 0; i < size; ++i) {
             input1[i] = rand() % 256 - 128;
@@ -194,10 +201,10 @@ elementwise_add_test_cleanup:
         if (input2_orig) {
             free(input2_orig);
         }
-        if (out_data_c) {
+        if (out_c_orig) {
             free(out_c_orig);
         }
-        if (out_data_opt) {
+        if (out_opt_orig) {
             free(out_opt_orig);
         }
     }
@@ -282,6 +289,11 @@ void esp_nn_mul_elementwise_s8_test()
         out_c_orig = out_data_c;
         out_opt_orig = out_data_opt;
 #endif
+        if (input1_orig == NULL || input2_orig == NULL || out_c_orig == NULL ||
+                out_opt_orig == NULL) {
+            printf(ANSI_COLOR_RED"%s error allocating buffers\n"ANSI_COLOR_RESET, __FUNCTION__);
+            goto elementwise_mult_test_cleanup;
+        }
 
         for (int i = 0; i < size; ++i) {
             input1[i] = rand() % 256 - 128;
@@ -333,10 +345,10 @@ elementwise_mult_test_cleanup:
         if (input2_orig) {
             free(input2_orig);
         }
-        if (out_data_c) {
+        if (out_c_orig) {
             free(out_c_orig);
         }
-        if (out_data_opt) {
+        if (out_opt_orig) {
             free(out_opt_orig);
         }
     }

+ 70 - 36
code/components/esp-nn/tests/src/convolution_test.c

@@ -22,8 +22,9 @@
 #include "test_utils.h"
 
 #if CONFIG_IDF_CMAKE
+#if (CONFIG_SPIRAM_SUPPORT && (CONFIG_SPIRAM_USE_CAPS_ALLOC || CONFIG_SPIRAM_USE_MALLOC))
 #define IDF_HEAP_CAPS 1
-
+#endif
 #if IDF_HEAP_CAPS
 #include "esp_heap_caps.h"
 #endif
@@ -44,8 +45,8 @@ void esp_nn_depthwise_conv_s8_test()
     uint16_t filter_ht, filter_wd, ch_mult;
     uint16_t pad_wd, pad_ht, stride_wd, stride_ht;
 
-    // run for 10 iterations
-    for (int itr = 0; itr < 10; itr++) {
+    // run for 15 iterations
+    for (int itr = 0; itr < 15; itr++) {
         /* prepare data */
         switch (itr) {
         case 0: // (ch_mult 1, (channels % 16) = 0), filter (3,3), pad (0,0)
@@ -144,22 +145,52 @@ void esp_nn_depthwise_conv_s8_test()
             stride_wd = 2;
             stride_ht = 2;
             break;
+        case 8: // same as case 7, with large parameters
+            input_wd = 58;
+            input_ht = 58;
+            filter_ht = 3;
+            filter_wd = 3;
+            ch_mult = 1;
+            channels = 128;
+            pad_wd = 0;
+            pad_ht = 0;
+            stride_wd = 2;
+            stride_ht = 2;
+            break;
+        case 9: // (ch_mult 1, (channels % 16) = 0), filter (3,3), pad (0,0)  stride (2,2)
+            input_wd = 6;
+            input_ht = 6;
+            filter_ht = 3;
+            filter_wd = 3;
+            ch_mult = 1;
+            channels = 16;
+            pad_wd = 0;
+            pad_ht = 0;
+            stride_wd = 2;
+            stride_ht = 2;
+            break;
         default:
-            input_wd = 4;
-            input_ht = 4;
+            input_wd = 6;
+            input_ht = 6;
             filter_ht = 3;
             filter_wd = 3;
-            ch_mult = 4;
-            channels = 4;
-            pad_wd = 1;
-            pad_ht = 1;
-            stride_wd = 1;
-            stride_ht = 1;
+            ch_mult = 1;
+            channels = 16;
+            stride_wd = rand() % 2 + 1;
+            stride_ht = stride_wd;
+            pad_wd = stride_wd == 1 ? 0 : rand() % 2;
+            pad_ht = pad_wd;
+            printf("stride(%d), pad (%d)\t", stride_wd, pad_wd);
             break;
         }
 
         uint16_t out_wd = (input_wd - filter_wd + 1) / stride_wd;
         uint16_t out_ht = (input_ht - filter_ht + 1) / stride_ht;
+        if (itr == 9) {
+            // expect the function to handle this gracefully
+            out_wd += 1;
+            out_ht += 1;
+        }
         int in_size = input_wd * input_ht * channels;
         int out_size = out_wd * out_ht * channels * ch_mult;
         int filter_size = filter_wd * filter_ht * channels * ch_mult + 4;
@@ -210,9 +241,16 @@ void esp_nn_depthwise_conv_s8_test()
             out_mult[i] = 0x7eb0e200 + rand() % 50;
         }
 
-        int scratch_buf_size = esp_nn_get_depthwise_conv_scratch_size(input_wd, input_ht,
-                                                                    channels, ch_mult,
-                                                                    filter_wd, filter_ht);
+        data_dims_t input_dims = {.width = input_wd, .height = input_ht, .channels = channels, 1};
+        data_dims_t output_dims = {.width = out_wd, .height = out_ht, .channels = channels * ch_mult, 1};
+        data_dims_t filter_dims = {.width = filter_wd, .height = filter_ht, 0, 0};
+        dw_conv_params_t conv_params = {.in_offset = input_offset, .out_offset = out_offset, .ch_mult = ch_mult,
+                                        .stride = {stride_wd, stride_ht}, .padding = {pad_wd, pad_ht},
+                                        .dilation = {0, 0}, .activation = {activation_min, activation_max}};
+        quant_data_t quant_data = {.shift = out_shift, .mult = out_mult};
+
+        int scratch_buf_size = esp_nn_get_depthwise_conv_scratch_size(&input_dims, &filter_dims,
+                                                                      &output_dims, &conv_params);
         if (scratch_buf_size > 0) {
 #if IDF_HEAP_CAPS
             scratch_buf = heap_caps_malloc(scratch_buf_size + 32, MALLOC_CAP_SPIRAM | MALLOC_CAP_8BIT);
@@ -234,11 +272,8 @@ void esp_nn_depthwise_conv_s8_test()
         }
 
         /* C function */
-        esp_nn_depthwise_conv_s8_ansi(input, input_wd, input_ht, channels, input_offset,
-                                    pad_wd, pad_ht, stride_wd, stride_ht, ch_mult,
-                                    filter_data + 4, filter_wd, filter_ht,
-                                    bias + 1, out_data_c, out_wd, out_ht, out_offset, out_shift,
-                                    out_mult, activation_min, activation_max);
+        esp_nn_depthwise_conv_s8_ansi(&input_dims, input, &filter_dims, filter_data + 4,
+                                      bias + 1, &output_dims, out_data_c, &conv_params, &quant_data);
 
         if (itr == 0) {
             profile_c_end();
@@ -246,11 +281,8 @@ void esp_nn_depthwise_conv_s8_test()
         }
 
         /* Optimized function */
-        esp_nn_depthwise_conv_s8(input, input_wd, input_ht, channels, input_offset,
-                                pad_wd, pad_ht, stride_wd, stride_ht, ch_mult,
-                                filter_data + 4, filter_wd, filter_ht,
-                                bias + 1, out_data_opt, out_wd, out_ht, out_offset, out_shift,
-                                out_mult, activation_min, activation_max);
+        esp_nn_depthwise_conv_s8(&input_dims, input, &filter_dims, filter_data + 4,
+                                 bias + 1, &output_dims, out_data_opt, &conv_params, &quant_data);
 
         if (itr == 0) {
             /* disable profiler */
@@ -479,8 +511,16 @@ void esp_nn_conv_s8_test()
             out_mult[i] = 0x7f67f4f8 + rand() % 50;
         }
 
-        int scratch_buf_size = esp_nn_get_conv_scratch_size(in_wd, in_ht, in_channels,
-                                                            out_channels, filter_wd, filter_ht);
+        data_dims_t input_dims = {.width = in_wd, .height = in_ht, .channels = in_channels, 1};
+        data_dims_t output_dims = {.width = out_wd, .height = out_ht, .channels = out_channels, 1};
+        data_dims_t filter_dims = {.width = filter_wd, .height = filter_ht, 0, 0};
+        conv_params_t conv_params = {.in_offset = input_offset, .out_offset = out_offset,
+                                    .stride = {stride_wd, stride_ht}, .padding = {pad_wd, pad_ht},
+                                    .dilation = {0, 0}, .activation = {activation_min, activation_max}};
+        quant_data_t quant_data = {.shift = out_shift, .mult = out_mult};
+
+        int scratch_buf_size = esp_nn_get_conv_scratch_size(&input_dims, &filter_dims,
+                                                            &output_dims, &conv_params);
         if (scratch_buf_size > 0) {
 #if IDF_HEAP_CAPS
             void *scratch_buf = heap_caps_malloc(scratch_buf_size + 32, MALLOC_CAP_SPIRAM | MALLOC_CAP_8BIT);
@@ -502,11 +542,8 @@ void esp_nn_conv_s8_test()
         }
 
         /* C function */
-        esp_nn_conv_s8_ansi(input, in_wd, in_ht, in_channels, input_offset,
-                            pad_wd, pad_ht, stride_wd, stride_ht,
-                            filter_data + 2, filter_wd, filter_ht, bias,
-                            out_data_c, out_wd, out_ht, out_channels, out_offset, out_shift,
-                            out_mult, activation_min, activation_max);
+        esp_nn_conv_s8_ansi(&input_dims, input, &filter_dims, filter_data + 2,
+                            bias, &output_dims, out_data_c, &conv_params, &quant_data);
 
         if (itr == 0) {
             profile_c_end();
@@ -514,11 +551,8 @@ void esp_nn_conv_s8_test()
         }
 
         /* Optimized function */
-        esp_nn_conv_s8(input, in_wd, in_ht, in_channels, input_offset,
-                    pad_wd, pad_ht, stride_wd, stride_ht,
-                    filter_data + 2, filter_wd, filter_ht, bias,
-                    out_data_opt, out_wd, out_ht, out_channels, out_offset, out_shift,
-                    out_mult, activation_min, activation_max);
+        esp_nn_conv_s8(&input_dims, input, &filter_dims, filter_data + 2,
+                       bias, &output_dims, out_data_opt, &conv_params, &quant_data);
 
         if (itr == 0) {
             /* disable profiler */

BIN
code/components/esp-nn_20220724.zip


BIN
code/components/esp-nn_20220716.zip → code/components/esp-nn_20220827.zip


BIN
code/components/esp32-camera-master.zip


BIN
code/components/esp32-camera-master_20220724.zip


+ 7 - 1
code/components/jomjol_controlcamera/ClassControllCamera.cpp

@@ -263,6 +263,9 @@ void CCamera::EnableAutoExposure(int flashdauer)
         ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed");
         LEDOnOff(false);
         LightOnOff(false);
+        LogFile.SwitchOnOff(true);
+        LogFile.WriteToFile("Camera Capture Failed (Procedure 'EnableAutoExposure') --> Reboot"
+                "Check that your camera module is working and connected properly.");
         doReboot();
     }
     esp_camera_fb_return(fb);        
@@ -313,7 +316,7 @@ esp_err_t CCamera::CaptureToBasisImage(CImageBasis *_Image, int delay)
         LightOnOff(false);
 
         LogFile.SwitchOnOff(true);
-        LogFile.WriteToFile("Camera is not working anymore - most propably hardware problem (instablility, ...). "
+        LogFile.WriteToFile("Camera is not working anymore (CCamera::CaptureToBasisImage) - most propably hardware problem (instablility, ...). "
                 "System will reboot.");
         doReboot();
 
@@ -410,6 +413,9 @@ esp_err_t CCamera::CaptureToFile(std::string nm, int delay)
         ESP_LOGE(TAGCAMERACLASS, "CaptureToFile: Camera Capture Failed");
         LEDOnOff(false);
         LightOnOff(false);
+        LogFile.SwitchOnOff(true);
+        LogFile.WriteToFile("Camera Capture Failed (CCamera::CaptureToFile) --> Reboot"
+                "Check that your camera module is working and connected properly.");
         doReboot();
 
         return ESP_FAIL;

+ 95 - 21
code/components/jomjol_fileserver_ota/server_file.cpp

@@ -120,16 +120,6 @@ esp_err_t get_tflite_file_handler(httpd_req_t *req)
 }
 
 
-/* Handler to redirect incoming GET request for /index.html to /
- * This can be overridden by uploading file with same name */
-// static esp_err_t index_html_get_handler(httpd_req_t *req)
-// {
-//     httpd_resp_set_status(req, "307 Temporary Redirect");
-//     httpd_resp_set_hdr(req, "Location", "/");
-//     httpd_resp_send(req, NULL, 0);  // Response body can be empty
-//     return ESP_OK;
-// }
-
 /* Send HTTP response with a run-time generated html consisting of
  * a list of all files and folders under the requested path.
  * In case of SPIFFS this returns empty list when path is any
@@ -716,6 +706,101 @@ void delete_all_in_directory(std::string _directory)
     closedir(dir);
 }
 
+std::string unzip_new(std::string _in_zip_file, std::string _target_zip, std::string _target_bin, std::string _main)
+{
+    int i, sort_iter;
+    mz_bool status;
+    size_t uncomp_size;
+    mz_zip_archive zip_archive;
+    void* p;
+    char archive_filename[64];
+    std::string zw, ret = "";
+//    static const char* s_Test_archive_filename = "testhtml.zip";
+
+    printf("miniz.c version: %s\n", MZ_VERSION);
+    printf("Zipfile: %s\n", _in_zip_file.c_str());
+    printf("Target Dir ZIP: %s\n", _target_zip.c_str());
+    printf("Target Dir BIN: %s\n", _target_bin.c_str());
+
+    // Now try to open the archive.
+    memset(&zip_archive, 0, sizeof(zip_archive));
+    status = mz_zip_reader_init_file(&zip_archive, _in_zip_file.c_str(), 0);
+    if (!status)
+    {
+        printf("mz_zip_reader_init_file() failed!\n");
+        return ret;
+    }
+
+    // Get and print information about each file in the archive.
+    int numberoffiles = (int)mz_zip_reader_get_num_files(&zip_archive);
+    for (sort_iter = 0; sort_iter < 2; sort_iter++)
+    {
+        memset(&zip_archive, 0, sizeof(zip_archive));
+        status = mz_zip_reader_init_file(&zip_archive, _in_zip_file.c_str(), sort_iter ? MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY : 0);
+        if (!status)
+        {
+            printf("mz_zip_reader_init_file() failed!\n");
+            return ret;
+        }
+
+        for (i = 0; i < numberoffiles; i++)
+        {
+            mz_zip_archive_file_stat file_stat;
+            mz_zip_reader_file_stat(&zip_archive, i, &file_stat);
+            sprintf(archive_filename, file_stat.m_filename);
+ 
+            // Try to extract all the files to the heap.
+            p = mz_zip_reader_extract_file_to_heap(&zip_archive, archive_filename, &uncomp_size, 0);
+            if (!p)
+            {
+                printf("mz_zip_reader_extract_file_to_heap() failed!\n");
+                mz_zip_reader_end(&zip_archive);
+                return ret;
+            }
+
+            // Save to File.
+            zw = std::string(archive_filename);
+            if (toUpper(zw) == "FIRMWARE.BIN")
+            {
+                zw = _target_bin + zw;
+                ret = zw;
+            }
+            else
+            {
+                std::string _dir = getDirectory(zw);
+
+                if (_dir.length() > 0)
+                {
+                    zw = _main + zw;
+                }
+                else
+                {
+                    zw = _target_zip + zw;
+                }
+
+            }
+
+            printf("Filename to extract: %s", zw.c_str());
+            DeleteFile(zw);
+            FILE* fpTargetFile = OpenFileAndWait(zw.c_str(), "wb");
+            fwrite(p, 1, (uint)uncomp_size, fpTargetFile);
+            fclose(fpTargetFile);
+
+            printf("Successfully extracted file \"%s\", size %u\n", archive_filename, (uint)uncomp_size);
+            //            printf("File data: \"%s\"\n", (const char*)p);
+
+            // We're done.
+            mz_free(p);
+        }
+
+        // Close the archive, freeing any resources it was using
+        mz_zip_reader_end(&zip_archive);
+    }
+
+    printf("Success.\n");
+    return ret;
+}
+
 void unzip(std::string _in_zip_file, std::string _target_directory){
     int i, sort_iter;
     mz_bool status;
@@ -860,15 +945,4 @@ void register_server_file_uri(httpd_handle_t server, const char *base_path)
     };
     httpd_register_uri_handler(server, &file_delete);
 
-
-    /* URI handler for getting tflite files from server */
-/*
-    httpd_uri_t file_tflite = {
-        .uri       = "/tflite",   // Match all URIs of type /delete/path/to/file
-        .method    = HTTP_GET,
-        .handler   = get_tflite_file_handler,
-        .user_ctx  = server_data    // Pass server data as context
-    };
-    httpd_register_uri_handler(server, &file_tflite);
-*/
 }

+ 2 - 0
code/components/jomjol_fileserver_ota/server_file.h

@@ -4,6 +4,8 @@
 void register_server_file_uri(httpd_handle_t server, const char *base_path);
 
 void unzip(std::string _in_zip_file, std::string _target_directory);
+std::string unzip_new(std::string _in_zip_file, std::string _target_zip, std::string _target_bin, std::string _main = "/sdcard/");
+
 
 void delete_all_in_directory(std::string _directory);
 

+ 3 - 0
code/components/jomjol_fileserver_ota/server_help.cpp

@@ -43,6 +43,7 @@ esp_err_t send_file(httpd_req_t *req, std::string filename)
     }
 
     ESP_LOGI(TAG, "Sending file : %s ...", filename.c_str());
+//    httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
     set_content_type_from_file(req, filename.c_str());
 
     /* Retrieve the pointer to scratch buffer for temporary storage */
@@ -120,6 +121,8 @@ esp_err_t set_content_type_from_file(httpd_req_t *req, const char *filename)
         return httpd_resp_set_type(req, "image/x-icon");
     } else if (IS_FILE_EXT(filename, ".js")) {
         return httpd_resp_set_type(req, "text/javascript");
+    } else if (IS_FILE_EXT(filename, ".css")) {
+        return httpd_resp_set_type(req, "text/css");
     }
     /* This is a limited set only */
     /* For any other type always set as plain text */

+ 99 - 22
code/components/jomjol_fileserver_ota/server_ota.cpp

@@ -50,6 +50,8 @@ static char ota_write_data[BUFFSIZE + 1] = { 0 };
 #define OTA_URL_SIZE 256
 static const char *TAGPARTOTA = "server_ota";
 
+esp_err_t handler_reboot(httpd_req_t *req);
+
 
 static void infinite_loop(void)
 {
@@ -207,24 +209,6 @@ static void print_sha256 (const uint8_t *image_hash, const char *label)
 
 static bool diagnostic(void)
 {
-/*
-    gpio_config_t io_conf;
-    io_conf.intr_type    = (gpio_int_type_t) GPIO_PIN_INTR_DISABLE;
-    io_conf.mode         = GPIO_MODE_INPUT;
-    io_conf.pin_bit_mask = (1ULL << CONFIG_EXAMPLE_GPIO_DIAGNOSTIC);
-    io_conf.pull_down_en = GPIO_PULLDOWN_DISABLE;
-    io_conf.pull_up_en   = GPIO_PULLUP_ENABLE;
-    gpio_config(&io_conf);
-
-    ESP_LOGI(TAGPARTOTA, "Diagnostics (5 sec)...");
-    vTaskDelay(5000 / portTICK_PERIOD_MS);
-
-    bool diagnostic_is_ok = gpio_get_level(CONFIG_EXAMPLE_GPIO_DIAGNOSTIC);
-
-    gpio_reset_pin(CONFIG_EXAMPLE_GPIO_DIAGNOSTIC);
-
-    return diagnostic_is_ok;
-*/
     return true;
 }
 
@@ -326,7 +310,7 @@ esp_err_t handler_ota_update(httpd_req_t *req)
         
         if (httpd_query_key_value(_query, "task", _valuechar, 30) == ESP_OK)
         {
-            printf("task is found"); printf(_valuechar); printf("\n"); 
+            printf("task is found: "); printf(_valuechar); printf("\n"); 
             _task = std::string(_valuechar);
         }
 
@@ -344,16 +328,105 @@ esp_err_t handler_ota_update(httpd_req_t *req)
 
     };
 
+    if (_task.compare("update") == 0)
+    {
+        std::string filetype = toUpper(getFileType(fn));
+        if (filetype.length() == 0)
+        {
+            std::string zw = "Update failed - no file specified (zip, bin, tfl, tlite)";
+            httpd_resp_sendstr_chunk(req, zw.c_str());
+            httpd_resp_sendstr_chunk(req, NULL);  
+            return ESP_OK;        
+        }
+
+
+        if ((filetype == "TFLITE") || (filetype == "TFL"))
+        {
+            std::string out = "/sdcard/config/" + getFileFullFileName(fn);
+            DeleteFile(out);
+            CopyFile(fn, out);
+            DeleteFile(fn);
+
+            const char*  resp_str = "Neural Network File copied.";
+            httpd_resp_sendstr_chunk(req, resp_str);
+            httpd_resp_sendstr_chunk(req, NULL);  
+            return ESP_OK;
+        }
+
+
+        if (filetype == "ZIP")
+        {
+            std::string in, out, outbin, zw, retfirmware;
+
+//            in = "/sdcard/firmware/html.zip";
+            out = "/sdcard/html";
+            outbin = "/sdcard/firmware";
+
+//            delete_all_in_directory(out);
+
+            retfirmware = unzip_new(fn, out+"/", outbin+"/");
+
+            if (retfirmware.length() > 0)
+            {
+                filetype = "BIN";
+                fn = retfirmware;
+                zw = "HTML Update Successfull!<br><br>Additioal firmware found in ZIP file.\n";
+                httpd_resp_sendstr_chunk(req, zw.c_str());
+            }
+            else
+            {
+                zw = "HTML Update Successfull!<br><br>No reboot necessary.\n";
+                httpd_resp_sendstr_chunk(req, zw.c_str());
+                httpd_resp_sendstr_chunk(req, NULL);  
+                return ESP_OK;        
+            }
+        }
+
+
+        if (filetype == "BIN")
+        {
+            const char* resp_str;    
+            KillTFliteTasks();
+            gpio_handler_deinit();
+            if (ota_update_task(fn))
+            {
+//                resp_str = "rebooting - Firmware Update Successfull!<br><br>You can restart now.";
+//                httpd_resp_send(req, resp_str, strlen(resp_str));  
+//                httpd_resp_sendstr_chunk(req, NULL);  
+                return handler_reboot(req);                
+            }
+            else
+            {
+                resp_str = "Error during Firmware Update!!!<br><br>Please check output of console.";
+            }
+
+            httpd_resp_send(req, resp_str, strlen(resp_str));  
+
+            #ifdef DEBUG_DETAIL_ON 
+                LogFile.WriteHeapInfo("handler_ota_update - Done");    
+            #endif
+
+            return ESP_OK;
+        }
+
+
+        std::string zw = "Update failed - no valid file specified (zip, bin, tfl, tlite)";
+        httpd_resp_sendstr_chunk(req, zw.c_str());
+        httpd_resp_sendstr_chunk(req, NULL);  
+        return ESP_OK;        
+    }
+
+
     if (_task.compare("unziphtml") == 0)
     {
         std::string in, out, zw;
 
         in = "/sdcard/firmware/html.zip";
-        out = "/sdcard/html/";
+        out = "/sdcard/html";
 
         delete_all_in_directory(out);
 
-        unzip(in, out);
+        unzip(in, out+"/");
         zw = "HTML Update Successfull!<br><br>No reboot necessary";
         httpd_resp_sendstr_chunk(req, zw.c_str());
         httpd_resp_sendstr_chunk(req, NULL);  
@@ -371,6 +444,8 @@ esp_err_t handler_ota_update(httpd_req_t *req)
             unlink(fn.c_str());
         }
         /* Respond with an empty chunk to signal HTTP response completion */
+        std::string zw = "file deleted!\n";
+        httpd_resp_sendstr_chunk(req, zw.c_str());
         httpd_resp_send_chunk(req, NULL, 0);
         return ESP_OK;
     }
@@ -416,6 +491,8 @@ void task_reboot(void *pvParameter)
 }
 
 void doReboot(){
+    LogFile.SwitchOnOff(true);
+    LogFile.WriteToFile("Reboot triggert by Software (5s).");
     ESP_LOGI(TAGPARTOTA, "Reboot in 5sec");
     LogFile.WriteToFile("Reboot in 5sec");
     xTaskCreate(&task_reboot, "reboot", configMINIMAL_STACK_SIZE * 64, NULL, 10, NULL);
@@ -435,7 +512,7 @@ esp_err_t handler_reboot(httpd_req_t *req)
 
     LogFile.WriteToFile("handler_reboot");
     ESP_LOGI(TAGPARTOTA, "!!! System will restart within 5 sec!!!");
-    const char* resp_str = "!!! System will restart within 5 sec!!!";
+    const char* resp_str = "<body style='font-family: arial'> <h3 id=t></h3></body><script>var h='Rebooting!<br>The page will automatically reload after around 25s.<br>'; document.getElementById('t').innerHTML=h; setInterval(function (){h +='.'; document.getElementById('t').innerHTML=h; fetch(window.location.hostname,{mode: 'no-cors'}).then(r=>{parent.location.href=('/index.html');})}, 1000);</script>";
     httpd_resp_send(req, resp_str, strlen(resp_str)); 
     
     doReboot();

+ 186 - 118
code/components/jomjol_flowcontroll/ClassFlowCNNGeneral.cpp

@@ -10,7 +10,7 @@
 
 static const char* TAG = "flow_analog";
 
-bool debugdetailgeneral = false;
+bool debugdetailgeneral = true;
 
 ClassFlowCNNGeneral::ClassFlowCNNGeneral(ClassFlowAlignment *_flowalign, t_CNNType _cnntype) : ClassFlowImage(NULL, TAG)
 {
@@ -28,7 +28,7 @@ ClassFlowCNNGeneral::ClassFlowCNNGeneral(ClassFlowAlignment *_flowalign, t_CNNTy
     flowpostalignment = _flowalign;
 }
 
-string ClassFlowCNNGeneral::getReadout(int _analog = 0, bool _extendedResolution, int prev)
+string ClassFlowCNNGeneral::getReadout(int _analog = 0, bool _extendedResolution, int prev, float _vorgaengerAnalog)
 {
     string result = "";    
 
@@ -41,8 +41,8 @@ string ClassFlowCNNGeneral::getReadout(int _analog = 0, bool _extendedResolution
         float zahl = GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result_float;
         int ergebnis_nachkomma = ((int) floor(zahl * 10) + 10) % 10;
         
-        prev = ZeigerEval(GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result_float, prev);
-        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::getReadout(analog) zahl=" + std::to_string(zahl) + ", ergebnis_nachkomma=" + std::to_string(ergebnis_nachkomma) + ", prev=" + std::to_string(prev));
+        prev = ZeigerEvalAnalogNeu(GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result_float, prev);
+//        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::getReadout(analog) zahl=" + std::to_string(zahl) + ", ergebnis_nachkomma=" + std::to_string(ergebnis_nachkomma) + ", prev=" + std::to_string(prev));
         result = std::to_string(prev);
 
         if (_extendedResolution && (CNNType != Digital))
@@ -50,7 +50,7 @@ string ClassFlowCNNGeneral::getReadout(int _analog = 0, bool _extendedResolution
 
         for (int i = GENERAL[_analog]->ROI.size() - 2; i >= 0; --i)
         {
-            prev = ZeigerEval(GENERAL[_analog]->ROI[i]->result_float, prev);
+            prev = ZeigerEvalAnalogNeu(GENERAL[_analog]->ROI[i]->result_float, prev);
             result = std::to_string(prev) + result;
         }
         return result;
@@ -82,13 +82,14 @@ string ClassFlowCNNGeneral::getReadout(int _analog = 0, bool _extendedResolution
                 result = std::to_string(ergebnis_vorkomma) + std::to_string(ergebnis_nachkomma);
                 prev = ergebnis_vorkomma;
                 if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::getReadout(dig100-ext) ergebnis_vorkomma=" + std::to_string(ergebnis_vorkomma) + ", ergebnis_nachkomma=" + std::to_string(ergebnis_nachkomma) + ", prev=" + std::to_string(prev));
-        
-
             }
             else
             {
 //                prev = ZeigerEval(GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result_float, prev);
-                prev = ZeigerEvalHybrid(GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result_float, prev, prev);
+                if (_vorgaengerAnalog >= 0)
+                    prev = ZeigerEvalHybridNeu(GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result_float, _vorgaengerAnalog, prev, true);
+                else
+                    prev = ZeigerEvalHybridNeu(GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result_float, prev, prev);
                 result = std::to_string(prev);
                 if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::getReadout(dig100)  prev=" + std::to_string(prev));
         
@@ -105,8 +106,8 @@ string ClassFlowCNNGeneral::getReadout(int _analog = 0, bool _extendedResolution
         {
             if (GENERAL[_analog]->ROI[i]->result_float >= 0)
             {
-                prev = ZeigerEvalHybrid(GENERAL[_analog]->ROI[i]->result_float, GENERAL[_analog]->ROI[i+1]->result_float, prev);
-                if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::getReadout#ZeigerEvalHybrid()= " + std::to_string(prev));
+                prev = ZeigerEvalHybridNeu(GENERAL[_analog]->ROI[i]->result_float, GENERAL[_analog]->ROI[i+1]->result_float, prev);
+                if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::getReadout#ZeigerEvalHybridNeu()= " + std::to_string(prev));
                 result = std::to_string(prev) + result;
                 if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::getReadout#result= " + result);
                 
@@ -122,55 +123,11 @@ string ClassFlowCNNGeneral::getReadout(int _analog = 0, bool _extendedResolution
         return result;
     }
 
-/*
-    if (CNNType == Digital100)
-    {
-        int zif_akt = -1;
-
-        float zahl = GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result_float;
-        if (zahl >= 0)       // NaN?
-        {
-            if (_extendedResolution)
-            {
-                int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
-                int ergebnis_vorkomma = ((int) floor(zahl)) % 10;
-
-                result = std::to_string(ergebnis_vorkomma) + std::to_string(ergebnis_nachkomma);
-                zif_akt = ergebnis_vorkomma;
-            }
-            else
-            {
-                zif_akt = ZeigerEvalHybrid(GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result_float, -1, -1);
-                result = std::to_string(zif_akt);
-            }
-        }
-        else
-        {
-            result = "N";
-            if (_extendedResolution && (CNNType != Digital))
-                result = "NN";
-        }
-
-        for (int i = GENERAL[_analog]->ROI.size() - 2; i >= 0; --i)
-        {
-            if (GENERAL[_analog]->ROI[i]->result_float >= 0)
-            {
-                zif_akt = ZeigerEvalHybrid(GENERAL[_analog]->ROI[i]->result_float, GENERAL[_analog]->ROI[i+1]->result_float, zif_akt);
-                result = std::to_string(zif_akt) + result;
-            }
-            else
-            {
-                zif_akt = -1;
-                result = "N" + result;
-            }
-        }
-        return result;
-    }
-*/
 
     return result;
 }
 
+/*
 int ClassFlowCNNGeneral::ZeigerEvalHybrid(float zahl, float zahl_vorgaenger, int eval_vorgaenger)
 {
     if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalHybrid( " + std::to_string(zahl) + ", " + std::to_string(zahl_vorgaenger) + ", " + std::to_string(eval_vorgaenger) + ")");
@@ -189,7 +146,7 @@ int ClassFlowCNNGeneral::ZeigerEvalHybrid(float zahl, float zahl_vorgaenger, int
 
     // 9.0, da bei getReadout() prev als int übergeben wird (9 statt 9.5)
     // tritt bei der ersten ziffer von digit auf, wenn analog davor (2. Aufruf von getReadout)
-    if ((zahl_vorgaenger >= 0.5 ) && (zahl_vorgaenger < 9.0))
+    if ((zahl_vorgaenger >= 0.5 ) && (zahl_vorgaenger < 9.5))
     {
         // kein Ziffernwechsel, da Vorkomma weit genug weg ist (0+/-0.5) --> zahl wird gerundet
         if ((ergebnis_nachkomma <= 2) || (ergebnis_nachkomma >= 8))     // Band um die Ziffer --> Runden, da Ziffer im Rahmen Ungenauigkeit erreicht
@@ -220,69 +177,170 @@ int ClassFlowCNNGeneral::ZeigerEvalHybrid(float zahl, float zahl_vorgaenger, int
                         + ", zahl_vorgaenger=" + std::to_string(zahl_vorgaenger) + ", eval_vorgaenger=" + std::to_string(eval_vorgaenger));
     return -1;
 
-/*
-    if (zahl_vorgaenger > 9.2)              // Ziffernwechsel beginnt
+}
+*/
+
+int ClassFlowCNNGeneral::ZeigerEvalHybridNeu(float zahl, float zahl_vorgaenger, int eval_vorgaenger, bool AnalogerVorgaenger)
+{
+    int result;
+    int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
+    int ergebnis_vorkomma = ((int) floor(zahl) + 10) % 10;
+
+    if (eval_vorgaenger < 0)
     {
-        if (eval_vorgaenger == 0)           // Wechsel hat schon stattgefunden
-        {
-            return ((int) round(zahl) + 10) % 10;      // Annahme, dass die neue Zahl schon in der Nähe des Ziels ist
-        }
+        if ((ergebnis_nachkomma <= DigitalUnschaerfe * 10) || (ergebnis_nachkomma >= DigitalUnschaerfe * 10))     // Band um die Ziffer --> Runden, da Ziffer im Rahmen Ungenauigkeit erreicht
+            result = (int) (round(zahl) + 10) % 10;
         else
-        {
-            if (zahl_vorgaenger <= 9.5)     // Wechsel startet gerade, aber beginnt erst
-            {
-                if ((ergebnis_nachkomma <= 2) || (ergebnis_nachkomma >= 8))     // Band um die Ziffer --> Runden, da Ziffer im Rahmen Ungenauigkeit erreicht
-                    return ((int) round(zahl) + 10) % 10;
-                else
-                    return ((int) trunc(zahl) + 10) % 10;
-            }
-            else
-            {
-                return ((int) trunc(zahl) + 10) % 10;   // Wechsel schon weiter fortgeschritten, d.h. über 2 als Nachkomma
-            }
-        }
+            result = (int) ((int) trunc(zahl) + 10) % 10;
+
+        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalHybridNeu - kein Vorgänger - Ergebnis = " + std::to_string(result) +
+                                                    " zahl: " + std::to_string(zahl) + " zahl_vorgaenger = " + std::to_string(zahl_vorgaenger)+ " eval_vorgaenger = " + std::to_string(eval_vorgaenger) + " DigitalUnschaerfe = " +  std::to_string(DigitalUnschaerfe));
+        return result;
     }
 
-    if ((ergebnis_nachkomma <= 2) || (ergebnis_nachkomma >= 8))     // Band um die Ziffer --> Runden, da Ziffer im Rahmen Ungenauigkeit erreicht
-        return ((int) round(zahl) + 10) % 10;
+    if (AnalogerVorgaenger)
+    {
+//        result = ZeigerEvalAnalogToDigitNeu(zahl, eval_vorgaenger);
+        result = ZeigerEvalAnalogToDigitNeu(zahl, zahl_vorgaenger, eval_vorgaenger);
+        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalHybridNeu - Analoger Vorgänger, Bewertung über ZeigerEvalAnalogNeu = " + std::to_string(result) +
+                                                    " zahl: " + std::to_string(zahl) + " zahl_vorgaenger = " + std::to_string(zahl_vorgaenger)+ " eval_vorgaenger = " + std::to_string(eval_vorgaenger) + " DigitalUnschaerfe = " +  std::to_string(DigitalUnschaerfe));
+        return result;
+    }
 
-    return ((int) trunc(zahl) + 10) % 10;
-*/
-}
+    if ((zahl_vorgaenger >= DigitalUebergangsbereichVorgaenger ) && (zahl_vorgaenger <= (10.0 - DigitalUebergangsbereichVorgaenger)))
+    {
+        // kein Ziffernwechsel, da Vorgänger weit genug weg ist (0+/-DigitalUebergangsbereichVorgaenger) --> zahl wird gerundet
+        if ((ergebnis_nachkomma <= DigitalBand) || (ergebnis_nachkomma >= (10-DigitalBand)))     // Band um die Ziffer --> Runden, da Ziffer im Rahmen Ungenauigkeit erreicht
+            result = ((int) round(zahl) + 10) % 10;
+        else
+            result = ((int) trunc(zahl) + 10) % 10;
 
+        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalHybridNeu - KEIN Analoger Vorgänger, kein Ziffernwechsel, da Vorkomma weit genug weg = " + std::to_string(result) +
+                                                    " zahl: " + std::to_string(zahl) + " zahl_vorgaenger = " + std::to_string(zahl_vorgaenger)+ " eval_vorgaenger = " + std::to_string(eval_vorgaenger) + " DigitalUnschaerfe = " +  std::to_string(DigitalUnschaerfe));
+        return result;
+    }  
 
+    if (eval_vorgaenger <= 1)  // Nulldurchgang hat stattgefunden (!Bewertung über Prev_value und nicht Zahl!) --> hier aufrunden (2.8 --> 3, aber auch 3.1 --> 3)
+    {
+        if (ergebnis_nachkomma > 5)
+            result =  (ergebnis_vorkomma + 1) % 10;
+        else
+            result =  ergebnis_vorkomma;
+        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalHybridNeu - KEIN Analoger Vorgänger, Nulldurchgang hat stattgefunden = " + std::to_string(result) +
+                                                    " zahl: " + std::to_string(zahl) + " zahl_vorgaenger = " + std::to_string(zahl_vorgaenger)+ " eval_vorgaenger = " + std::to_string(eval_vorgaenger) + " DigitalUnschaerfe = " +  std::to_string(DigitalUnschaerfe));
+        return result;
+    }
+
+    // bleibt nur >= 9.5 --> noch kein Nulldurchgang --> 2.8 --> 2, und 3.1 --> 2
+    // alles >=x.4 kann als aktuelle Zahl gelten im Übergang. Bei 9.5 Vorgänger kann die aktuelle
+    // Zahl noch x.6 - x.7 sein. 
+    if (ergebnis_nachkomma >= 4)
+        result =  ergebnis_vorkomma;
+    else
+        result =  (ergebnis_vorkomma - 1 + 10) % 10;
 
-int ClassFlowCNNGeneral::ZeigerEval(float zahl, int ziffer_vorgaenger)
-{   
-    int ergebnis_nachkomma = ((int) floor(zahl * 10) + 10) % 10;
+    if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalHybridNeu - KEIN Analoger Vorgänger, >= 9.5 --> noch kein Nulldurchgang = " + std::to_string(result) +
+                                                " zahl: " + std::to_string(zahl) + " zahl_vorgaenger = " + std::to_string(zahl_vorgaenger)+ " eval_vorgaenger = " + std::to_string(eval_vorgaenger) + " DigitalUnschaerfe = " +  std::to_string(DigitalUnschaerfe) + " ergebnis_nachkomma = " + std::to_string(ergebnis_nachkomma));
+    return result;
+}
+
+
+int ClassFlowCNNGeneral::ZeigerEvalAnalogToDigitNeu(float zahl, float ziffer_vorgaenger,  int eval_vorgaenger)
+{
+    int result;
+    int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
     int ergebnis_vorkomma = ((int) floor(zahl) + 10) % 10;
-    int ergebnis;
-    float ergebnis_rating;
-    if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEval erg_v=" + std::to_string(ergebnis_vorkomma) + ", erg_n=" + std::to_string(ergebnis_nachkomma) + ", ziff_v=" + std::to_string(ziffer_vorgaenger));
 
-    if (ziffer_vorgaenger == -1)
-        return ergebnis_vorkomma % 10;
-
-    // Ist die aktuelle Stelle schon umgesprungen und die Vorstelle noch nicht?
-    // Akt.: 2.1, Vorstelle = 0.9 => 1.9
-    // Problem sind mehrere Rundungen 
-    // Bsp. zahl=4.5, Vorgänger= 9.6 (ziffer_vorgaenger=0)
-    // Tritt nur auf bei Übergang von analog auf digit
-    ergebnis_rating = ergebnis_nachkomma - ziffer_vorgaenger;
-    if (ergebnis_nachkomma >= 5)
-        ergebnis_rating-=5.1;
+    if (ziffer_vorgaenger < 0)
+    {
+        result = (int) floor(zahl);
+        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalAnalogToDigitNeu - kein Vorgänger - Ergebnis = " + std::to_string(result) +
+                                                    " zahl: " + std::to_string(zahl) + " ziffer_vorgaenger = " + std::to_string(ziffer_vorgaenger) + " AnalogFehler = " +  std::to_string(AnalogFehler));
+        return result;
+    }
+
+    if ((ziffer_vorgaenger >= DigitalUebergangsbereichVorgaengerAnalogToDigit ) && (ziffer_vorgaenger <= (10.0 - DigitalUebergangsbereichVorgaengerAnalogToDigit)))
+    {
+        // kein Ziffernwechsel, da Vorgänger weit genug weg ist (0+/-DigitalUebergangsbereichVorgaenger) --> zahl wird gerundet
+        if ((ergebnis_nachkomma <= 2) || (ergebnis_nachkomma >= 8))     // Band um die Ziffer --> Runden, da Ziffer im Rahmen Ungenauigkeit erreicht
+            result = ((int) round(zahl) + 10) % 10;
+        else
+            result = ((int) trunc(zahl) + 10) % 10;
+
+        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalAnalogToDigitNeu - kein Ziffernwechsel, da Vorkomma weit genug weg = " + std::to_string(result) +
+                                                    " zahl: " + std::to_string(zahl) + " ziffer_vorgaenger = " + std::to_string(ziffer_vorgaenger) + " DigitalUnschaerfe = " +  std::to_string(DigitalUnschaerfe));
+        return result;
+    }  
+
+    if (ziffer_vorgaenger <= 1 && eval_vorgaenger<9)  // Nulldurchgang hat stattgefunden (!Bewertung über Prev_value und nicht Zahl!) --> hier aufrunden (2.8 --> 3, aber auch 3.1 --> 3)
+        // aber Sonderfall ziffer_vorgaeger = 0.1 vor_vorgaenger 9.9 => eval_vorgaenger ist 9, damit hat Nulldurchgang nicht stattgefunden.
+    {
+        if (ergebnis_nachkomma > 5)
+            result =  (ergebnis_vorkomma + 1) % 10;
+        else
+            result =  ergebnis_vorkomma;
+        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalAnalogToDigitNeu - Nulldurchgang hat stattgefunden = " + std::to_string(result) +
+                                                    " zahl: " + std::to_string(zahl) + " ziffer_vorgaenger = " + std::to_string(ziffer_vorgaenger) + " DigitalUnschaerfe = " +  std::to_string(DigitalUnschaerfe));
+        return result;
+    }
+
+    // bleibt nur >= 9.5 --> noch kein Nulldurchgang --> 2.8 --> 2, und 3.1 --> 2
+    // hier auf 4 reduziert, da erst ab Vorgänder 9 anfängt umzustellen. Bei 9.5 Vorgänger kann die aktuelle
+    // Zahl noch x.4 - x.5 sein.
+    if (ergebnis_nachkomma >= 4)
+        result =  ergebnis_vorkomma;
     else
-        ergebnis_rating+=5;
-    ergebnis = (int) round(zahl);
-    if (ergebnis_rating < 0)
-        ergebnis-=1;
-    if (ergebnis == -1)
-        ergebnis+=10;
+        result =  (ergebnis_vorkomma - 1 + 10) % 10;
+
+    if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalAnalogToDigitNeu - 9.0 --> noch kein Nulldurchgang = " + std::to_string(result) +
+                                                    " zahl: " + std::to_string(zahl) + " ziffer_vorgaenger = " + std::to_string(ziffer_vorgaenger) + " DigitalUnschaerfe = " +  std::to_string(DigitalUnschaerfe));
+    return result;
+}
+
+int ClassFlowCNNGeneral::ZeigerEvalAnalogNeu(float zahl, int ziffer_vorgaenger)
+{
+    float zahl_min, zahl_max;
+    int result;
+
+    if (ziffer_vorgaenger == -1)
+    {
+        result = (int) floor(zahl);
+        if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalAnalogNeu - kein Vorgänger - Ergebnis = " + std::to_string(result) +
+                                                    " zahl: " + std::to_string(zahl) + " ziffer_vorgaenger = " + std::to_string(ziffer_vorgaenger) + " AnalogFehler = " +  std::to_string(AnalogFehler));
+        return result;
+    }
+
+    zahl_min = zahl - AnalogFehler / 10.0;
+    zahl_max = zahl + AnalogFehler / 10.0;
+
+    if ((int) floor(zahl_max) - (int) floor(zahl_min) != 0)
+    {
+        if (ziffer_vorgaenger <= AnalogFehler)
+        {
+            result = ((int) floor(zahl_max) + 10) % 10;
+            if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalAnalogNeu - Zahl uneindeutig, Korrektur nach oben - Ergebnis = " + std::to_string(result) +
+                                                        " zahl: " + std::to_string(zahl) + " ziffer_vorgaenger = " + std::to_string(ziffer_vorgaenger) + " AnalogFehler = " +  std::to_string(AnalogFehler));
+            return result;
+        }
+        if (ziffer_vorgaenger >= 10 - AnalogFehler)
+        {
+            result = ((int) floor(zahl_min) + 10) % 10;
+            if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalAnalogNeu - Zahl uneindeutig, Korrektur nach unten - Ergebnis = " + std::to_string(result) +
+                                                        " zahl: " + std::to_string(zahl) + " ziffer_vorgaenger = " + std::to_string(ziffer_vorgaenger) + " AnalogFehler = " +  std::to_string(AnalogFehler));
+            return result;
+        }
+    }
     
-    ergebnis = (ergebnis + 10) % 10;
-    return ergebnis;
+
+    result = ((int) floor(zahl) + 10) % 10;
+    if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::ZeigerEvalAnalogNeu - Zahl eindeutig, keine Korrektur notwendig - Ergebnis = " + std::to_string(result) +
+                                                " zahl: " + std::to_string(zahl) + " ziffer_vorgaenger = " + std::to_string(ziffer_vorgaenger) + " AnalogFehler = " +  std::to_string(AnalogFehler));
+
+    return result;
+
 }
 
+
 bool ClassFlowCNNGeneral::ReadParameter(FILE* pfile, string& aktparamgraph)
 {
     std::vector<string> zerlegt;
@@ -327,11 +385,6 @@ bool ClassFlowCNNGeneral::ReadParameter(FILE* pfile, string& aktparamgraph)
         {
             this->logfileRetentionInDays = std::stoi(zerlegt[1]);
         }
-//        if ((toUpper(zerlegt[0]) == "MODELTYPE") && (zerlegt.size() > 1))
-//        {
-//            if (toUpper(zerlegt[1]) == "DIGITHYPRID")
-//                CNNType = DigitalHyprid;
-//        }
 
         if ((toUpper(zerlegt[0]) == "MODEL") && (zerlegt.size() > 1))
         {
@@ -350,6 +403,11 @@ bool ClassFlowCNNGeneral::ReadParameter(FILE* pfile, string& aktparamgraph)
             neuroi->posy = std::stoi(zerlegt[2]);
             neuroi->deltax = std::stoi(zerlegt[3]);
             neuroi->deltay = std::stoi(zerlegt[4]);
+            neuroi->CCW = false;
+            if (zerlegt.size() >= 6)
+            {
+                neuroi->CCW = toUpper(zerlegt[5]) == "TRUE";
+            }
             neuroi->result_float = -1;
             neuroi->image = NULL;
             neuroi->image_org = NULL;
@@ -422,7 +480,7 @@ general* ClassFlowCNNGeneral::GetGENERAL(string _name, bool _create = true)
 
     _ret->ROI.push_back(neuroi);
 
-    printf("GetGENERAL - GENERAL %s - roi %s\n", _analog.c_str(), _roi.c_str());
+    printf("GetGENERAL - GENERAL %s - roi %s - CCW: %d\n", _analog.c_str(), _roi.c_str(), neuroi->CCW);
 
     return _ret;
 }
@@ -569,10 +627,11 @@ bool ClassFlowCNNGeneral::getNetworkParameter()
                 CNNType = Digital;
                 printf("TFlite-Type set to Digital\n");
                 break;
-            case 20:
+/*            case 20:
                 CNNType = DigitalHyprid10;
                 printf("TFlite-Type set to DigitalHyprid10\n");
                 break;
+*/
 //            case 22:
 //                CNNType = DigitalHyprid;
 //                printf("TFlite-Type set to DigitalHyprid\n");
@@ -635,8 +694,13 @@ bool ClassFlowCNNGeneral::doNeuralNetwork(string time)
                         f1 = tflite->GetOutputValue(0);
                         f2 = tflite->GetOutputValue(1);
                         float result = fmod(atan2(f1, f2) / (M_PI * 2) + 2, 1);
-                        GENERAL[_ana]->ROI[i]->result_float = result * 10;
-                        printf("Result General(Analog)%i: %f\n", i, GENERAL[_ana]->ROI[i]->result_float); 
+                              
+                        if(GENERAL[_ana]->ROI[i]->CCW)
+                            GENERAL[_ana]->ROI[i]->result_float = 10 - (result * 10);
+                        else
+                            GENERAL[_ana]->ROI[i]->result_float = result * 10;
+                              
+                        printf("Result General(Analog)%i - CCW: %d -  %f\n", i, GENERAL[_ana]->ROI[i]->CCW, GENERAL[_ana]->ROI[i]->result_float); 
                         if (isLogImage)
                             LogImage(logPath, GENERAL[_ana]->ROI[i]->name, &GENERAL[_ana]->ROI[i]->result_float, NULL, time, GENERAL[_ana]->ROI[i]->image_org);
                     } break;
@@ -701,6 +765,7 @@ bool ClassFlowCNNGeneral::doNeuralNetwork(string time)
                         }
                     } break;
 */
+/*
                 case DigitalHyprid10:
                     {
                         int _num, _nachkomma;
@@ -736,6 +801,7 @@ bool ClassFlowCNNGeneral::doNeuralNetwork(string time)
                             }
                         }
                     } break;
+*/
 
                 case DoubleHyprid10:
                     {
@@ -769,7 +835,7 @@ bool ClassFlowCNNGeneral::doNeuralNetwork(string time)
                             _fit = _val + _valminus;
 
                         }
-                        if (result > 10)
+                        if (result >= 10)
                             result = result - 10;
                         if (result < 0)
                             result = result + 10;
@@ -827,15 +893,17 @@ bool ClassFlowCNNGeneral::doNeuralNetwork(string time)
     
                         _num = tflite->GetOutClassification();
                         
-                        GENERAL[_ana]->ROI[i]->result_float = (float)_num / 10.0;
+                        if(GENERAL[_ana]->ROI[i]->CCW)
+                            GENERAL[_ana]->ROI[i]->result_float = 10 - ((float)_num / 10.0);                              
+                        else
+                            GENERAL[_ana]->ROI[i]->result_float = (float)_num / 10.0;
 
- 
                         _result_save_file = GENERAL[_ana]->ROI[i]->result_float;
 
                         
                         GENERAL[_ana]->ROI[i]->isReject = false;
                         
-                        printf("Result General(Analog)%i: %f\n", i, GENERAL[_ana]->ROI[i]->result_float); 
+                        printf("Result General(Analog)%i - CCW: %d -  %f\n", i, GENERAL[_ana]->ROI[i]->CCW, GENERAL[_ana]->ROI[i]->result_float); 
 
                         if (isLogImage)
                         {

+ 13 - 6
code/components/jomjol_flowcontroll/ClassFlowCNNGeneral.h

@@ -10,7 +10,6 @@ enum t_CNNType {
     Analogue,
     Analogue100,
     Digital,
-//    DigitalHyprid,
     DigitalHyprid10,
     DoubleHyprid10,
     Digital100,
@@ -24,18 +23,26 @@ protected:
     t_CNNType CNNType;
     std::vector<general*> GENERAL;
     float CNNGoodThreshold;
+    float AnalogFehler = 3.0;
+    float AnalogToDigtalFehler = 0.8;
+    float DigitalUnschaerfe = 0.2;
+    int DigitalBand = 3;
+    float DigitalAnalogerVorgaengerUebergangsbereich = 2;
+    float DigitalUebergangsbereichVorgaengerAnalogToDigit = 1; // war vorher 2
+    float DigitalUebergangsbereichVorgaenger = 0.7; // 9.3 - 0.7
 
     string cnnmodelfile;
     int modelxsize, modelysize, modelchannel;
     bool isLogImageSelect;
     string LogImageSelect;
     ClassFlowAlignment* flowpostalignment;
-//    ClassFlowPostProcessing *flowpostprocessing = NULL;
+
     bool SaveAllFiles;   
-//    bool extendedResolution;
 
-    int ZeigerEval(float zahl, int ziffer_vorgaenger);
-    int ZeigerEvalHybrid(float zahl, float zahl_vorgaenger, int eval_vorgaenger);
+    int ZeigerEvalAnalogNeu(float zahl, int ziffer_vorgaenger);
+    int ZeigerEvalAnalogToDigitNeu(float zahl, float ziffer_vorgaenger,  int eval_vorgaenger);
+    int ZeigerEvalHybridNeu(float zahl, float zahl_vorgaenger, int eval_vorgaenger, bool AnalogerVorgaenger = false);
+
 
 
     bool doNeuralNetwork(string time); 
@@ -50,7 +57,7 @@ public:
     bool doFlow(string time);
 
     string getHTMLSingleStep(string host);
-    string getReadout(int _analog, bool _extendedResolution = false, int prev = -1);   
+    string getReadout(int _analog, bool _extendedResolution = false, int prev = -1, float _vorgaengerAnalog = -1);   
 
     void DrawROI(CImageBasis *_zw); 
 

+ 3 - 0
code/components/jomjol_flowcontroll/ClassFlowControll.cpp

@@ -305,6 +305,7 @@ bool ClassFlowControll::doFlow(string time)
             if (i) i -= 1;    // vorheriger Schritt muss wiederholt werden (vermutlich Bilder aufnehmen)
             result = false;
             if (repeat > 5) {
+                LogFile.SwitchOnOff(true);
                 LogFile.WriteToFile("Wiederholung 5x nicht erfolgreich --> reboot");
                 doReboot();
                 // Schritt wurde 5x wiederholt --> reboot
@@ -493,6 +494,8 @@ bool ClassFlowControll::ReadParameter(FILE* pfile, string& aktparamgraph)
                 // reboot notwendig damit die neue wlan.ini auch benutzt wird !!!
                 fclose(pfile);
                 printf("do reboot\n");
+                LogFile.SwitchOnOff(true);
+                LogFile.WriteToFile("Reboot to activate new HOSTNAME.");
                 esp_restart();
                 hard_restart();                   
                 doReboot();

+ 4 - 4
code/components/jomjol_flowcontroll/ClassFlowDefineTypes.h

@@ -7,7 +7,7 @@ struct roi {
     int posx, posy, deltax, deltay;
     float result_float;
     int result_klasse;
-    bool isReject;
+    bool isReject, CCW;
     string name;
     CImageBasis *image, *image_org;
 };
@@ -33,9 +33,9 @@ struct NumberPost {
     bool checkDigitIncreaseConsistency;
     time_t lastvalue;
     string timeStamp;
-    float FlowRateAct;          // m3 / min
-    float PreValue;             // letzter Wert, der gut ausgelesen wurde
-    float Value;                // letzer ausgelesener Wert, inkl. Korrekturen
+    double FlowRateAct;          // m3 / min
+    double PreValue;             // letzter Wert, der gut ausgelesen wurde
+    double Value;                // letzer ausgelesener Wert, inkl. Korrekturen
     string ReturnRateValue;      // RückgabewertRate
     string ReturnChangeAbsolute;      // RückgabewertRate
     string ReturnRawValue;      // Rohwert (mit N & führenden 0)    

+ 5 - 0
code/components/jomjol_flowcontroll/ClassFlowImage.cpp

@@ -63,7 +63,12 @@ void ClassFlowImage::LogImage(string logPath, string name, float *resultFloat, i
         if (*resultFloat < 0)
             sprintf(buf, "N.N_");
         else
+        {
             sprintf(buf, "%.1f_", *resultFloat);
+            if (strcmp(buf, "10.0_"))
+                sprintf(buf, "0.0_");
+        }
+            
 	} else if (resultInt != NULL) {
 		sprintf(buf, "%d_", *resultInt);
 	} else {

+ 93 - 10
code/components/jomjol_flowcontroll/ClassFlowMQTT.cpp

@@ -6,6 +6,7 @@
 #include "time_sntp.h"
 #include "interface_mqtt.h"
 #include "ClassFlowPostProcessing.h"
+#include "ClassLogFile.h"
 
 #include <time.h>
 
@@ -31,9 +32,7 @@ void ClassFlowMQTT::SetInitialParameter(void)
     ListFlowControll = NULL; 
     disabled = false;
     MQTTenable = false;
-    
-    
-
+    keepAlive = 600; // TODO This must be greater than the Flow Interval!
 }       
 
 ClassFlowMQTT::ClassFlowMQTT()
@@ -124,11 +123,50 @@ bool ClassFlowMQTT::ReadParameter(FILE* pfile, string& aktparamgraph)
         printf("InitMQTTInit\n");
         mainerrortopic = maintopic + "/connection";
         printf("Init MQTT with uri: %s, clientname: %s, user: %s, password: %s, maintopic: %s\n", uri.c_str(), clientname.c_str(), user.c_str(), password.c_str(), mainerrortopic.c_str());
-        MQTTInit(uri, clientname, user, password, mainerrortopic, 60); 
-        MQTTPublish(mainerrortopic, "connected", SetRetainFlag);
-        MQTTenable = true;
+        if (!MQTTInit(uri, clientname, user, password, mainerrortopic, keepAlive))
+        { // Failed
+            MQTTenable = false;
+            return true; // We need to return true despite we failed, else it will retry 5x and then reboot!
+        }
+    }
+
+    // Try sending mainerrortopic. If it fails, re-run init
+    if (!MQTTPublish(mainerrortopic, "connected", SetRetainFlag))
+    { // Failed
+        LogFile.WriteToFile("MQTT - Re-running init...!");
+        if (!MQTTInit(this->uri, this->clientname, this->user, this->password, this->mainerrortopic, keepAlive))
+        { // Failed
+            MQTTenable = false;
+            return false;
+        } 
+    }
+
+    // Try again and quit if it fails
+    if (!MQTTPublish(mainerrortopic, "connected", SetRetainFlag))
+    { // Failed
+        MQTTenable = false;
+        return false;
     }
+
+
+
    
+ /*   if (!MQTTPublish(mainerrortopic, "connected", SetRetainFlag))
+    { // Failed
+        LogFile.WriteToFile("MQTT - Could not publish connection status!");
+        MQTTenable = false;
+        return true; // We need to return true despite we failed, else it will retry 5x and then reboot!
+    }*/
+
+ /*   if(!MQTTPublish(_LWTContext, "", 1))
+    {
+        LogFile.WriteToFile("MQTT - Could not publish LWT!");
+        MQTTenable = false;
+        return true; // We need to return true despite we failed, else it will retry 5x and then reboot!
+    }*/
+
+
+    MQTTenable = true;
     return true;
 }
 
@@ -141,8 +179,44 @@ string ClassFlowMQTT::GetMQTTMainTopic()
 
 bool ClassFlowMQTT::doFlow(string zwtime)
 {
-    if (!MQTTenable)
-        return true;
+  //  if (!MQTTenable) {
+  //      LogFile.WriteToFile("MQTT not enabled!");
+  //
+  //      // Try again to init it
+  //   if (!MQTTInit(this->uri, this->clientname, this->user, this->password, this->mainerrortopic, keepAlive))
+  //      { // Failed
+  //          MQTTenable = false;
+  //          return true; // We need to return true despite we failed, else it will retry 5x and then reboot!
+  //      } 
+  //
+  //     if (!MQTTPublish(mainerrortopic, "connected", SetRetainFlag))
+  //      { // Failed
+  //          MQTTenable = false;
+  //          return true; // We need to return true despite we failed, else it will retry 5x and then reboot!
+  //      }
+  //      
+  //      LogFile.WriteToFile("MQTT is now enabled");
+  //      MQTTenable = true;
+  //  }
+
+
+    // Try sending mainerrortopic. If it fails, re-run init
+    if (!MQTTPublish(mainerrortopic, "connected", SetRetainFlag))
+    { // Failed
+        LogFile.WriteToFile("MQTT - Re-running init...!");
+        if (!MQTTInit(this->uri, this->clientname, this->user, this->password, this->mainerrortopic, keepAlive))
+        { // Failed
+            MQTTenable = false;
+            return true; // We need to return true despite we failed, else it will retry 5x and then reboot!
+        } 
+    }
+
+    // Try again and quit if it fails
+    if (!MQTTPublish(mainerrortopic, "connected", SetRetainFlag))
+    { // Failed
+        MQTTenable = false;
+        return true; // We need to return true despite we failed, else it will retry 5x and then reboot!
+    }
 
     std::string result;
     std::string resulterror = "";
@@ -153,7 +227,10 @@ bool ClassFlowMQTT::doFlow(string zwtime)
     string zw = "";
     string namenumber = "";
 
-    MQTTPublish(mainerrortopic, "connected");
+    // if (!MQTTPublish(mainerrortopic, "connected", SetRetainFlag))
+    //{ // Failed, skip other topics
+    //    return true; // We need to return true despite we failed, else it will retry 5x and then reboot!
+    //}
     
     zw = maintopic + "/" + "uptime";
     char uptimeStr[11];
@@ -163,13 +240,19 @@ bool ClassFlowMQTT::doFlow(string zwtime)
     zw = maintopic + "/" + "freeMem";
     char freeheapmem[11];
     sprintf(freeheapmem, "%zu", esp_get_free_heap_size());
-    MQTTPublish(zw, freeheapmem, SetRetainFlag);
+    if (!MQTTPublish(zw, freeheapmem, SetRetainFlag))
+    { // Failed, skip other topics
+        return true; // We need to return true despite we failed, else it will retry 5x and then reboot!
+    }
 
     zw = maintopic + "/" + "wifiRSSI";
     char rssi[11];
     sprintf(rssi, "%d", get_WIFI_RSSI());
     MQTTPublish(zw, rssi, SetRetainFlag);
 
+    zw = maintopic + "/" + "CPUtemp";
+    std::string cputemp = std::to_string(temperatureRead());
+    MQTTPublish(zw, cputemp, SetRetainFlag);
 
     if (flowpostprocessing)
     {

+ 1 - 0
code/components/jomjol_flowcontroll/ClassFlowMQTT.h

@@ -15,6 +15,7 @@ protected:
     std::string user, password; 
     int SetRetainFlag;
     bool MQTTenable;
+    int keepAlive;
 
     std::string maintopic, mainerrortopic; 
 	void SetInitialParameter(void);        

+ 68 - 24
code/components/jomjol_flowcontroll/ClassFlowPostProcessing.cpp

@@ -9,6 +9,7 @@
 #include <time.h>
 
 #include "time_sntp.h"
+//#define SERIAL_DEBUG // testing debug on serial enabled
 
 
 #define PREVALUE_TIME_FORMAT_OUTPUT "%Y-%m-%dT%H:%M:%S"
@@ -68,7 +69,7 @@ string ClassFlowPostProcessing::GetPreValue(std::string _number)
     return result;
 }
 
-void ClassFlowPostProcessing::SetPreValue(float zw, string _numbers, bool _extern)
+void ClassFlowPostProcessing::SetPreValue(double zw, string _numbers, bool _extern)
 {
     printf("SetPrevalue: %f, %s\n", zw, _numbers.c_str());
     for (int j = 0; j < NUMBERS.size(); ++j)
@@ -126,7 +127,7 @@ bool ClassFlowPostProcessing::LoadPreValue(void)
             {
                 if (NUMBERS[j]->name == name)
                 {
-                    NUMBERS[j]->PreValue = stof(zwvalue.c_str());
+                    NUMBERS[j]->PreValue = stod(zwvalue.c_str());
                     NUMBERS[j]->ReturnPreValue = RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma + 1);      // SIcherheitshalber 1 Stelle mehr, da ggf. Exgtended Resolution an ist (wird erst beim ersten Durchlauf gesetzt)
 
                     time_t tStart;
@@ -177,7 +178,7 @@ bool ClassFlowPostProcessing::LoadPreValue(void)
         fclose(pFile);
         printf("%s", zw);
         zwvalue = trim(std::string(zw));
-        NUMBERS[0]->PreValue = stof(zwvalue.c_str());
+        NUMBERS[0]->PreValue = stod(zwvalue.c_str());
 
         time_t tStart;
         int yy, month, dd, hh, mm, ss;
@@ -238,8 +239,9 @@ void ClassFlowPostProcessing::SavePreValue()
 
         _zw = NUMBERS[j]->name + "\t" + NUMBERS[j]->timeStamp + "\t" + RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma) + "\n";
         printf("Write PreValue Zeile: %s\n", _zw.c_str());
-
-        fputs(_zw.c_str(), pFile);
+        if (pFile) {
+            fputs(_zw.c_str(), pFile);
+        }
     }
 
     UpdatePreValueINI = false;
@@ -568,8 +570,10 @@ void ClassFlowPostProcessing::InitNUMBERS()
         NUMBERS.push_back(_number);
     }
 
-    for (int i = 0; i < NUMBERS.size(); ++i)
+    for (int i = 0; i < NUMBERS.size(); ++i) {
         printf("Number %s, Anz DIG: %d, Anz ANA %d\n", NUMBERS[i]->name.c_str(), NUMBERS[i]->AnzahlDigital, NUMBERS[i]->AnzahlAnalog);
+    }
+
 }
 
 string ClassFlowPostProcessing::ShiftDecimal(string in, int _decShift){
@@ -660,27 +664,35 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
                     previous_value = zw - 48;
             }
         }
-
+        #ifdef SERIAL_DEBUG
+            printf("After analog->getReadout: ReturnRaw %s\n", NUMBERS[j]->ReturnRawValue.c_str());  
+        #endif
         if (NUMBERS[j]->digit_roi && NUMBERS[j]->analog_roi)
             NUMBERS[j]->ReturnRawValue = "." + NUMBERS[j]->ReturnRawValue;
 
         if (NUMBERS[j]->digit_roi)
         {
             if (NUMBERS[j]->analog_roi) 
-                NUMBERS[j]->ReturnRawValue = flowDigit->getReadout(j, false, previous_value) + NUMBERS[j]->ReturnRawValue;
+                NUMBERS[j]->ReturnRawValue = flowDigit->getReadout(j, false, previous_value, NUMBERS[j]->analog_roi->ROI[0]->result_float) + NUMBERS[j]->ReturnRawValue;
             else
                 NUMBERS[j]->ReturnRawValue = flowDigit->getReadout(j, NUMBERS[j]->isExtendedResolution, previous_value);        // Extended Resolution nur falls es keine analogen Ziffern gibt
         }
-
+        #ifdef SERIAL_DEBUG
+            printf("After digital->getReadout: ReturnRaw %s\n", NUMBERS[j]->ReturnRawValue.c_str());  
+        #endif
         NUMBERS[j]->ReturnRawValue = ShiftDecimal(NUMBERS[j]->ReturnRawValue, NUMBERS[j]->DecimalShift);
 
-        printf("ReturnRaw %s", NUMBERS[j]->ReturnRawValue.c_str());  
-
+        #ifdef SERIAL_DEBUG
+            printf("After ShiftDecimal: ReturnRaw %s\n", NUMBERS[j]->ReturnRawValue.c_str());  
+        #endif
 
         if (IgnoreLeadingNaN)               
             while ((NUMBERS[j]->ReturnRawValue.length() > 1) && (NUMBERS[j]->ReturnRawValue[0] == 'N'))
                 NUMBERS[j]->ReturnRawValue.erase(0, 1);
 
+        #ifdef SERIAL_DEBUG
+            printf("After IgnoreLeadingNaN: ReturnRaw %s\n", NUMBERS[j]->ReturnRawValue.c_str());  
+        #endif
         NUMBERS[j]->ReturnValue = NUMBERS[j]->ReturnRawValue;
 
         if (findDelimiterPos(NUMBERS[j]->ReturnValue, "N") != std::string::npos)
@@ -690,18 +702,38 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
             else
                 continue; // es gibt keinen Zahl, da noch ein N vorhanden ist.
         }
-
+        #ifdef SERIAL_DEBUG
+            printf("After findDelimiterPos: ReturnValue %s\n", NUMBERS[j]->ReturnRawValue.c_str());  
+        #endif
         // Lösche führende Nullen (außer es ist nur noch einen 0)
         while ((NUMBERS[j]->ReturnValue.length() > 1) && (NUMBERS[j]->ReturnValue[0] == '0'))
             NUMBERS[j]->ReturnValue.erase(0, 1);
-
-        NUMBERS[j]->Value = std::stof(NUMBERS[j]->ReturnValue);
+        #ifdef SERIAL_DEBUG
+            printf("After removeLeadingZeros: ReturnValue %s\n", NUMBERS[j]->ReturnRawValue.c_str());  
+        #endif
+        NUMBERS[j]->Value = std::stod(NUMBERS[j]->ReturnValue);
+        #ifdef SERIAL_DEBUG
+            printf("After setting the Value: Value %f and as double is %f\n", NUMBERS[j]->Value, std::stod(NUMBERS[j]->ReturnValue));  
+        #endif
 
         if (NUMBERS[j]->checkDigitIncreaseConsistency)
         {
-            NUMBERS[j]->Value = checkDigitConsistency(NUMBERS[j]->Value, NUMBERS[j]->DecimalShift, NUMBERS[j]->analog_roi != NULL, NUMBERS[j]->PreValue);
+            if (flowDigit)
+            {
+                if (flowDigit->getCNNType() != Digital)
+                    printf("checkDigitIncreaseConsistency = true - ignored due to wrong CNN-Type (not Digital Classification)\n"); 
+                else 
+                    NUMBERS[j]->Value = checkDigitConsistency(NUMBERS[j]->Value, NUMBERS[j]->DecimalShift, NUMBERS[j]->analog_roi != NULL, NUMBERS[j]->PreValue);
+            }
+            else
+            {
+                printf("checkDigitIncreaseConsistency = true - no digital numbers defined!\n"); 
+            }
         }
 
+        #ifdef SERIAL_DEBUG
+            printf("After checkDigitIncreaseConsistency: Value %f\n", NUMBERS[j]->Value);  
+        #endif
 
 
         if (!NUMBERS[j]->AllowNegativeRates)
@@ -714,7 +746,9 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
                 continue;
             }
         }
-
+        #ifdef SERIAL_DEBUG
+            printf("After AllowNegativeRates: Value %f\n", NUMBERS[j]->Value);  
+        #endif
         double difference = difftime(imagetime, NUMBERS[j]->lastvalue);      // in Sekunden
         difference /= 60;  
         NUMBERS[j]->FlowRateAct = (NUMBERS[j]->Value - NUMBERS[j]->PreValue) / difference;
@@ -722,7 +756,7 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
 
         if (NUMBERS[j]->useMaxRateValue && PreValueUse && NUMBERS[j]->PreValueOkay)
         {
-            float _ratedifference;  
+            double _ratedifference;  
             if (NUMBERS[j]->RateType == RateChange)
                 _ratedifference = NUMBERS[j]->FlowRateAct;
             else
@@ -737,7 +771,9 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
                 continue;
             }
         }
-
+        #ifdef SERIAL_DEBUG
+           printf("After MaxRateCheck: Value %f\n", NUMBERS[j]->Value);  
+        #endif
         NUMBERS[j]->ReturnChangeAbsolute = RundeOutput(NUMBERS[j]->Value - NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);                                                
         NUMBERS[j]->lastvalue = imagetime;
         NUMBERS[j]->PreValue = NUMBERS[j]->Value;
@@ -815,7 +851,7 @@ string ClassFlowPostProcessing::getReadoutParam(bool _rawValue, bool _noerror, i
     return NUMBERS[_number]->ReturnValue;
 }
 
-string ClassFlowPostProcessing::RundeOutput(float _in, int _anzNachkomma){
+string ClassFlowPostProcessing::RundeOutput(double _in, int _anzNachkomma){
     std::stringstream stream;
     int _zw = _in;    
 //    printf("AnzNachkomma: %d\n", _anzNachkomma);
@@ -839,7 +875,7 @@ string ClassFlowPostProcessing::RundeOutput(float _in, int _anzNachkomma){
 }
 
 
-string ClassFlowPostProcessing::ErsetzteN(string input, float _prevalue)
+string ClassFlowPostProcessing::ErsetzteN(string input, double _prevalue)
 {
     int posN, posPunkt;
     int pot, ziffer;
@@ -870,7 +906,7 @@ string ClassFlowPostProcessing::ErsetzteN(string input, float _prevalue)
     return input;
 }
 
-float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamshift, bool _isanalog, float _preValue){
+float ClassFlowPostProcessing::checkDigitConsistency(double input, int _decilamshift, bool _isanalog, double _preValue){
     int aktdigit, olddigit;
     int aktdigit_before, olddigit_before;
     int pot, pot_max;
@@ -882,8 +918,14 @@ float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamsh
     {
         pot++;
     }
+    #ifdef SERIAL_DEBUG
+        printf("checkDigitConsistency: pot=%d, decimalshift=%d\n", pot, _decilamshift);
+    #endif
     pot_max = ((int) log10(input)) + 1;
-
+    float not_checked_input = floorf(input * pow(10, pot)) / pow(10, pot);
+    #ifdef SERIAL_DEBUG
+        printf("checkDigitConsistency: not_checked_input=%f\n", not_checked_input);
+    #endif
     while (pot <= pot_max)
     {
         zw = input / pow(10, pot-1);
@@ -912,11 +954,13 @@ float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamsh
                 input = input + ((float) (1)) * pow(10, pot);   // addiere 1 an der Stelle
             }
         }
-
+        #ifdef SERIAL_DEBUG
+            printf("checkDigitConsistency: input=%f", input);
+        #endif
         pot++;
     }
 
-    return input;
+    return not_checked_input + input;
 }
 
 string ClassFlowPostProcessing::getReadoutRate(int _number)

+ 4 - 4
code/components/jomjol_flowcontroll/ClassFlowPostProcessing.h

@@ -32,9 +32,9 @@ protected:
     bool LoadPreValue(void);
     string ShiftDecimal(string in, int _decShift);
 
-    string ErsetzteN(string, float _prevalue);
-    float checkDigitConsistency(float input, int _decilamshift, bool _isanalog, float _preValue);
-    string RundeOutput(float _in, int _anzNachkomma);
+    string ErsetzteN(string, double _prevalue);
+    float checkDigitConsistency(double input, int _decilamshift, bool _isanalog, double _preValue);
+    string RundeOutput(double _in, int _anzNachkomma);
 
     void InitNUMBERS();
     void handleDecimalSeparator(string _decsep, string _value);
@@ -58,7 +58,7 @@ public:
     string getReadoutTimeStamp(int _number = 0);
     void SavePreValue();
     string GetPreValue(std::string _number = "");
-    void SetPreValue(float zw, string _numbers, bool _extern = false);
+    void SetPreValue(double zw, string _numbers, bool _extern = false);
 
     std::string GetJSON(std::string _id = "", std::string _mac = "", std::string _lineend = "\n");
 

+ 47 - 2
code/components/jomjol_helper/Helper.cpp

@@ -209,6 +209,21 @@ size_t findDelimiterPos(string input, string delimiter)
 	return pos;
 }
 
+void DeleteFile(string fn)
+{
+//	ESP_LOGI(logTag, "Deleting file : %s", fn.c_str());
+	/* Delete file */
+	FILE* fpSourceFile = OpenFileAndWait(fn.c_str(), "rb");
+	if (!fpSourceFile)	// Sourcefile existiert nicht sonst gibt es einen Fehler beim Kopierversuch!
+	{
+		printf("DeleteFile: File %s existiert nicht!\n", fn.c_str());
+		return;
+	}
+	fclose(fpSourceFile);
+
+	unlink(fn.c_str());    
+}
+
 
 void CopyFile(string input, string output)
 {
@@ -243,18 +258,48 @@ void CopyFile(string input, string output)
 	// Close The Files
 	fclose(fpSourceFile);
 	fclose(fpTargetFile);
+	printf("File copied: %s to %s", input.c_str(), output.c_str());
 }
 
+string getFileFullFileName(string filename)
+{
+	size_t lastpos = filename.find_last_of('/');
+
+	if (lastpos == string::npos)
+		return "";
+
+//	printf("Last position: %d\n", lastpos);
+
+	string zw = filename.substr(lastpos + 1, filename.size() - lastpos);
+
+	return zw;
+}
+
+string getDirectory(string filename)
+{
+	size_t lastpos = filename.find('/');
+
+	if (lastpos == string::npos)
+		return "";
+
+//	printf("Directory: %d\n", lastpos);
+
+	string zw = filename.substr(0, lastpos - 1);
+	return zw;
+}
 
 string getFileType(string filename)
 {
-	int lastpos = filename.find(".", 0);
-	int neu_pos;
+	size_t lastpos = filename.find(".", 0);
+	size_t neu_pos;
 	while ((neu_pos = filename.find(".", lastpos + 1)) > -1)
 	{
 		lastpos = neu_pos;
 	}
 
+	if (lastpos == string::npos)
+		return "";
+
 	string zw = filename.substr(lastpos + 1, filename.size() - lastpos);
 	zw = toUpper(zw);
 

+ 3 - 0
code/components/jomjol_helper/Helper.h

@@ -10,6 +10,7 @@ std::string FormatFileName(std::string input);
 void FindReplace(std::string& line, std::string& oldString, std::string& newString);
 
 void CopyFile(string input, string output);
+void DeleteFile(string fn);
 
 FILE* OpenFileAndWait(const char* nm, const char* _mode, int _waitsec = 1);
 
@@ -19,6 +20,8 @@ string trim(string istring, string adddelimiter = "");
 bool ctype_space(const char c, string adddelimiter);
 
 string getFileType(string filename);
+string getFileFullFileName(string filename);
+string getDirectory(string filename);
 
 int mkdir_r(const char *dir, const mode_t mode);
 int removeFolder(const char* folderPath, const char* logTag);

+ 2 - 1
code/components/jomjol_logfile/ClassLogFile.cpp

@@ -73,7 +73,7 @@ void ClassLogFile::WriteToDedicatedFile(std::string _fn, std::string info, bool
 
 //    pFile = OpenFileAndWait(_fn.c_str(), "a"); 
     pFile = fopen(_fn.c_str(), "a+");
-    printf("Logfile opened: %s\n", _fn.c_str());
+//    printf("Logfile opened: %s\n", _fn.c_str());
 
     if (pFile!=NULL) {
         if (_time)
@@ -129,6 +129,7 @@ void ClassLogFile::WriteToFile(std::string info, bool _time)
     std::string logpath = logroot + "/" + buffer; 
     
     WriteToDedicatedFile(logpath, info, _time);
+    printf((info + "\n").c_str());
 }
 
 std::string ClassLogFile::GetCurrentFileName()

+ 104 - 15
code/components/jomjol_mqtt/interface_mqtt.cpp

@@ -19,18 +19,43 @@ esp_mqtt_event_id_t esp_mmqtt_ID = MQTT_EVENT_ANY;
 bool mqtt_connected = false;
 esp_mqtt_client_handle_t client = NULL;
 
-void MQTTPublish(std::string _key, std::string _content, int retained_flag){
-    if (client && mqtt_connected) {
-        int msg_id;
-        std::string zw;
-        msg_id = esp_mqtt_client_publish(client, _key.c_str(), _content.c_str(), 0, 1, retained_flag);
-        zw = "sent publish successful in MQTTPublish, msg_id=" + std::to_string(msg_id) + ", " + _key + ", " + _content;
-        if (debugdetail) LogFile.WriteToFile(zw);
-        ESP_LOGD(TAG_INTERFACEMQTT, "sent publish successful in MQTTPublish, msg_id=%d, %s, %s", msg_id, _key.c_str(), _content.c_str());
-    }
-    else {
-        ESP_LOGW(TAG_INTERFACEMQTT, "Problem with Publish, client=%d, mqtt_connected %d", (int) client, (int) mqtt_connected);
+bool MQTTPublish(std::string _key, std::string _content, int retained_flag){
+  
+  //  if (!client) {
+  //      LogFile.WriteToFile("MQTT - client not initialized!");  
+  //      return false;      
+  //  }
+  //  LogFile.WriteToFile("MQTT - client initialized!");  // Debug
+  //
+  //  if (!mqtt_connected) {
+  //      LogFile.WriteToFile("MQTT - Can not publish, not connected!");
+  //      ESP_LOGW(TAG_INTERFACEMQTT, "Problem with Publish, client=%d, mqtt_connected %d", (int) client, (int) mqtt_connected);
+  //      return false;            
+  //  }
+  //  LogFile.WriteToFile("MQTT - connected!");  // Debug
+
+ /*   if (client && mqtt_connected) {
+        LogFile.WriteToFile("MQTT - connected!");  // Debug
     }
+    else { // init needed
+        if (!MQTTInit(this->uri, this->clientname, this->user, password, mainerrortopic, keepAlive)) // validate{
+        { // Failed
+            return false;
+        }
+    }*/
+
+
+    int msg_id;
+    std::string zw;
+    msg_id = esp_mqtt_client_publish(client, _key.c_str(), _content.c_str(), 0, 1, retained_flag);
+    if (msg_id < 0) {
+        LogFile.WriteToFile("MQTT - Failed to publish '" + _key + "'!");
+        return false;
+    }
+    zw = "MQTT - sent publish successful in MQTTPublish, msg_id=" + std::to_string(msg_id) + ", " + _key + ", " + _content;
+    if (debugdetail) LogFile.WriteToFile(zw);
+    ESP_LOGD(TAG_INTERFACEMQTT, "sent publish successful in MQTTPublish, msg_id=%d, %s, %s", msg_id, _key.c_str(), _content.c_str());
+    return true;
 }
 
 
@@ -90,11 +115,23 @@ static void mqtt_event_handler(void *handler_args, esp_event_base_t base, int32_
     mqtt_event_handler_cb((esp_mqtt_event_handle_t) event_data);
 }
 
-void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, std::string _password, std::string _LWTContext, int _keepalive){
+
+bool MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, std::string _password, std::string _LWTContext, int _keepalive){
     std::string _zwmessage = "connection lost";
 
     int _lzw = _zwmessage.length();
 
+/*    LWTContext = _LWTContext;
+
+    mqtt_cfg.uri = _mqttURI.c_str();
+    mqtt_cfg.client_id = _clientid.c_str();
+    mqtt_cfg.lwt_topic = _LWTContext.c_str();
+    mqtt_cfg.lwt_msg = _zwmessage.c_str();
+    mqtt_cfg.lwt_retain = 1;
+    mqtt_cfg.lwt_msg_len = _lzw;
+    mqtt_cfg.keepalive = _keepalive;
+*/
+
     esp_mqtt_client_config_t mqtt_cfg = {
         .uri = _mqttURI.c_str(),
         .client_id = _clientid.c_str(),
@@ -105,28 +142,78 @@ void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, st
         .keepalive = _keepalive
     };
 
+    LogFile.WriteToFile("MQTT - Init");
+
     if (_user.length() && _password.length()){
         mqtt_cfg.username = _user.c_str();
         mqtt_cfg.password = _password.c_str();
         ESP_LOGI(TAG_INTERFACEMQTT, "Connect to MQTT: %s, %s", mqtt_cfg.username, mqtt_cfg.password);
     };
 
+    MQTTdestroy();
     client = esp_mqtt_client_init(&mqtt_cfg);
     if (client)
     {
         if (esp_mqtt_client_register_event(client, esp_mmqtt_ID, mqtt_event_handler, client) != ESP_OK)
+        {
             LogFile.WriteToFile("MQTT - Could not register event!");
+            return false;
+        }
         if (esp_mqtt_client_start(client) != ESP_OK)
+        {
             LogFile.WriteToFile("MQTT - Could not start client!");
+            return false;
+        }
+
+       /* if(!MQTTPublish(_LWTContext, "", 1))
+        {
+            LogFile.WriteToFile("MQTT - Could not publish LWT!");
+            return false;
+        }*/
+    }
+    else
+    {
+        LogFile.WriteToFile("MQTT - Could not Init client!");
+        return false;
+    }
+
+    LogFile.WriteToFile("MQTT - Init successful");
+    return true;
+}
+
+/*
+void MQTTReConnect(){
+    std::string _zwmessage = "connection lost";
+    int _lzw = _zwmessage.length();
+
+>>>>>>> Stashed changes
+    client = esp_mqtt_client_init(&mqtt_cfg);
+    if (client)
+    {
+        if (esp_mqtt_client_register_event(client, esp_mmqtt_ID, mqtt_event_handler, client) != ESP_OK)
+            LogFile.WriteToFile("MQTT - Could not register event!");
+        if (esp_mqtt_client_start(client) != ESP_OK)
+            LogFile.WriteToFile("MQTT - Could not start client!");
+
+<<<<<<< Updated upstream
+        if(MQTTPublish(_LWTContext, "", 1)) {
+            LogFile.WriteToFile("MQTT - Client init successful");
+        }
+=======
+        if (mqtt_connected)
+            MQTTPublish(LWTContext, "", 1);
+        else
+            LogFile.WriteToFile("Problem with (Re)Connection not successful!");
 
-        MQTTPublish(_LWTContext, "", 1);
+>>>>>>> Stashed changes
     }
     else
     {
-        LogFile.WriteToFile("MQTT - Could not Init MQTT Client!");
+        LogFile.WriteToFile("MQTT - Could not Init client!");
     }
 
 }
+*/
 
 void MQTTdestroy() {
     if (client != NULL) {
@@ -185,6 +272,7 @@ void MQTTregisterSubscribeFunction(std::string topic, std::function<bool(std::st
 
 void MQTTconnected(){
     if (mqtt_connected) {
+        LogFile.WriteToFile("MQTT - Connected");
         if (connectFunktionMap != NULL) {
             for(std::map<std::string, std::function<void()>>::iterator it = connectFunktionMap->begin(); it != connectFunktionMap->end(); ++it) {
                 it->second();
@@ -192,10 +280,11 @@ void MQTTconnected(){
             }
         }
 
-        if (subscribeFunktionMap != NULL) {
+       if (subscribeFunktionMap != NULL) {
             for(std::map<std::string, std::function<bool(std::string, char*, int)>>::iterator it = subscribeFunktionMap->begin(); it != subscribeFunktionMap->end(); ++it) {
                 int msg_id = esp_mqtt_client_subscribe(client, it->first.c_str(), 0);
                 ESP_LOGD(TAG_INTERFACEMQTT, "topic %s subscribe successful, msg_id=%d", it->first.c_str(), msg_id);
+                LogFile.WriteToFile("MQTT - topic " + it->first + " subscribe successful, msg_id=" + std::to_string(msg_id));
             }
         }
     }

+ 2 - 2
code/components/jomjol_mqtt/interface_mqtt.h

@@ -5,12 +5,12 @@
 #include <map>
 #include <functional>
 
-void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, std::string _password, std::string _LWTContext, int _keepalive);
+bool MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, std::string _password, std::string _LWTContext, int _keepalive);
 void MQTTdestroy();
 
 //void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user = "", std::string _password = "");
 
-void MQTTPublish(std::string _key, std::string _content, int retained_flag = 1);            // retained Flag as Standart
+bool MQTTPublish(std::string _key, std::string _content, int retained_flag = 1);            // retained Flag as Standart
 
 bool MQTTisConnected();
 

+ 66 - 1
code/components/jomjol_tfliteclass/server_tflite.cpp

@@ -21,6 +21,7 @@
 #include "server_GPIO.h"
 
 #include "server_file.h"
+#include "connect_wlan.h"
 
 #define DEBUG_DETAIL_ON       
 
@@ -590,6 +591,55 @@ esp_err_t handler_statusflow(httpd_req_t *req)
     return ESP_OK;
 };
 
+esp_err_t handler_cputemp(httpd_req_t *req)
+{
+#ifdef DEBUG_DETAIL_ON       
+    LogFile.WriteHeapInfo("handler_cputemp - Start");       
+#endif
+
+    const char* resp_str;
+    char cputemp[20];
+    
+    sprintf(cputemp, "CPU Temp: %4.1f°C", temperatureRead());
+
+    resp_str = cputemp;
+
+    httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
+    httpd_resp_send(req, resp_str, strlen(resp_str));   
+    /* Respond with an empty chunk to signal HTTP response completion */
+    httpd_resp_send_chunk(req, NULL, 0);      
+
+#ifdef DEBUG_DETAIL_ON       
+    LogFile.WriteHeapInfo("handler_cputemp - End");       
+#endif
+
+    return ESP_OK;
+};
+
+esp_err_t handler_rssi(httpd_req_t *req)
+{
+#ifdef DEBUG_DETAIL_ON       
+    LogFile.WriteHeapInfo("handler_rssi - Start");       
+#endif
+
+    const char* resp_str;
+    char rssi[20];
+
+    sprintf(rssi, "RSSI: %idBm", get_WIFI_RSSI());
+
+    resp_str = rssi;
+
+    httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
+    httpd_resp_send(req, resp_str, strlen(resp_str));   
+    /* Respond with an empty chunk to signal HTTP response completion */
+    httpd_resp_send_chunk(req, NULL, 0);      
+
+#ifdef DEBUG_DETAIL_ON       
+    LogFile.WriteHeapInfo("handler_rssi - End");       
+#endif
+
+    return ESP_OK;
+};
 
 esp_err_t handler_prevalue(httpd_req_t *req)
 {
@@ -643,7 +693,7 @@ esp_err_t handler_prevalue(httpd_req_t *req)
     httpd_resp_send_chunk(req, NULL, 0);      
 
 #ifdef DEBUG_DETAIL_ON       
-    LogFile.WriteHeapInfo("handler_prevalue - Start");       
+    LogFile.WriteHeapInfo("handler_prevalue - End");       
 #endif
 
     return ESP_OK;
@@ -766,11 +816,26 @@ void register_server_tflite_uri(httpd_handle_t server)
     camuri.user_ctx  = (void*) "Light Off"; 
     httpd_register_uri_handler(server, &camuri);  
     
+    camuri.uri       = "/cputemp.html";
+    camuri.handler   = handler_cputemp;
+    camuri.user_ctx  = (void*) "Light Off"; 
+    httpd_register_uri_handler(server, &camuri);  
+
+    camuri.uri       = "/rssi.html";
+    camuri.handler   = handler_rssi;
+    camuri.user_ctx  = (void*) "Light Off"; 
+    httpd_register_uri_handler(server, &camuri);  
+
     camuri.uri       = "/editflow.html";
     camuri.handler   = handler_editflow;
     camuri.user_ctx  = (void*) "EditFlow"; 
     httpd_register_uri_handler(server, &camuri);     
 
+    camuri.uri       = "/value.html";
+    camuri.handler   = handler_wasserzaehler;
+    camuri.user_ctx  = (void*) "Value"; 
+    httpd_register_uri_handler(server, &camuri);  
+
     camuri.uri       = "/wasserzaehler.html";
     camuri.handler   = handler_wasserzaehler;
     camuri.user_ctx  = (void*) "Wasserzaehler"; 

+ 6 - 1
code/components/tflite-lib/CMakeLists.txt

@@ -25,7 +25,8 @@ list(REMOVE_ITEM srcs_kernels
           "${tfmicro_kernels_dir}/depthwise_conv.cc"
           "${tfmicro_kernels_dir}/fully_connected.cc"
           "${tfmicro_kernels_dir}/mul.cc"
-          "${tfmicro_kernels_dir}/pooling.cc")
+          "${tfmicro_kernels_dir}/pooling.cc"
+          "${tfmicro_kernels_dir}/softmax.cc")
 
 FILE(GLOB esp_nn_kernels
           "${tfmicro_kernels_dir}/esp_nn/*.cc")
@@ -38,6 +39,10 @@ set(lib_srcs
           "${tflite_dir}/kernels/kernel_util.cc"
           "${tflite_dir}/micro/memory_planner/greedy_memory_planner.cc"
           "${tflite_dir}/micro/memory_planner/linear_memory_planner.cc"
+          "${tflite_dir}/micro/arena_allocator/non_persistent_arena_buffer_allocator.cc"
+          "${tflite_dir}/micro/arena_allocator/persistent_arena_buffer_allocator.cc"
+          "${tflite_dir}/micro/arena_allocator/recording_single_arena_buffer_allocator.cc"
+          "${tflite_dir}/micro/arena_allocator/single_arena_buffer_allocator.cc"
           "${tflite_dir}/c/common.cc"
           "${tflite_dir}/core/api/error_reporter.cc"
           "${tflite_dir}/core/api/flatbuffer_conversions.cc"

+ 6 - 0
code/components/tflite-lib/tensorflow/lite/builtin_ops.h

@@ -179,6 +179,12 @@ typedef enum {
   kTfLiteBuiltinMultinomial = 149,
   kTfLiteBuiltinGelu = 150,
   kTfLiteBuiltinDynamicUpdateSlice = 151,
+  kTfLiteBuiltinRelu0To1 = 152,
+  kTfLiteBuiltinUnsortedSegmentProd = 153,
+  kTfLiteBuiltinUnsortedSegmentMax = 154,
+  kTfLiteBuiltinUnsortedSegmentSum = 155,
+  kTfLiteBuiltinAtan2 = 156,
+  kTfLiteBuiltinUnsortedSegmentMin = 157,
 } TfLiteBuiltinOperator;
 
 #ifdef __cplusplus

+ 7 - 1
code/components/tflite-lib/tensorflow/lite/c/c_api_types.h

@@ -113,7 +113,13 @@ typedef struct TfLiteQuantizationParams {
 } TfLiteQuantizationParams;
 
 // --------------------------------------------------------------------------
-// Opaque types used by c_api_opaque.h.
+// Opaque types used by c_api.h, c_api_opaque.h and common.h.
+
+// TfLiteOpaqueContext is an opaque version of TfLiteContext;
+typedef struct TfLiteOpaqueContext TfLiteOpaqueContext;
+
+// TfLiteOpaqueNode is an opaque version of TfLiteNode;
+typedef struct TfLiteOpaqueNode TfLiteOpaqueNode;
 
 // TfLiteOpaqueTensor is an opaque version of TfLiteTensor;
 typedef struct TfLiteOpaqueTensor TfLiteOpaqueTensor;

+ 24 - 10
code/components/tflite-lib/tensorflow/lite/c/common.cc

@@ -14,7 +14,11 @@ limitations under the License.
 ==============================================================================*/
 
 #include "tensorflow/lite/c/common.h"
+
 #include "tensorflow/lite/c/c_api_types.h"
+#ifdef TF_LITE_TENSORFLOW_PROFILER
+#include "tensorflow/lite/tensorflow_profiler_logger.h"
+#endif
 
 #ifndef TF_LITE_STATIC_MEMORY
 #include <stdlib.h>
@@ -99,7 +103,12 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); }
 void TfLiteTensorDataFree(TfLiteTensor* t) {
   if (t->allocation_type == kTfLiteDynamic ||
       t->allocation_type == kTfLitePersistentRo) {
-    free(t->data.raw);
+    if (t->data.raw) {
+#ifdef TF_LITE_TENSORFLOW_PROFILER
+      tflite::OnTfLiteTensorDealloc(t);
+#endif
+      free(t->data.raw);
+    }
   }
   t->data.raw = nullptr;
 }
@@ -161,7 +170,7 @@ void TfLiteTensorFree(TfLiteTensor* t) {
   t->dims = nullptr;
 
   if (t->dims_signature) {
-    TfLiteIntArrayFree((TfLiteIntArray *) t->dims_signature);
+    TfLiteIntArrayFree((TfLiteIntArray*)t->dims_signature);
   }
   t->dims_signature = nullptr;
 
@@ -191,16 +200,12 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
 }
 
 TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst) {
-  if (!src || !dst)
-    return kTfLiteOk;
-  if (src->bytes != dst->bytes)
-    return kTfLiteError;
-  if (src == dst)
-    return kTfLiteOk;
+  if (!src || !dst) return kTfLiteOk;
+  if (src->bytes != dst->bytes) return kTfLiteError;
+  if (src == dst) return kTfLiteOk;
 
   dst->type = src->type;
-  if (dst->dims)
-    TfLiteIntArrayFree(dst->dims);
+  if (dst->dims) TfLiteIntArrayFree(dst->dims);
   dst->dims = TfLiteIntArrayCopy(src->dims);
   memcpy(dst->data.raw, src->data.raw, src->bytes);
   dst->buffer_handle = src->buffer_handle;
@@ -218,8 +223,17 @@ void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
   // TODO(b/145340303): Tensor data should be aligned.
   if (!tensor->data.raw) {
     tensor->data.raw = (char*)malloc(num_bytes);
+#ifdef TF_LITE_TENSORFLOW_PROFILER
+    tflite::OnTfLiteTensorAlloc(tensor, num_bytes);
+#endif
   } else if (num_bytes > tensor->bytes) {
+#ifdef TF_LITE_TENSORFLOW_PROFILER
+    tflite::OnTfLiteTensorDealloc(tensor);
+#endif
     tensor->data.raw = (char*)realloc(tensor->data.raw, num_bytes);
+#ifdef TF_LITE_TENSORFLOW_PROFILER
+    tflite::OnTfLiteTensorAlloc(tensor, num_bytes);
+#endif
   }
   tensor->bytes = num_bytes;
 }

+ 32 - 3
code/components/tflite-lib/tensorflow/lite/c/common.h

@@ -173,9 +173,9 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
     }                                                 \
   } while (false)
 #else  // TF_LITE_STRIP_ERROR_STRINGS
-#define UNUSED(...) (void)sizeof(#__VA_ARGS__)
-#define TF_LITE_KERNEL_LOG(context, ...) UNUSED(__VA_ARGS__)
-#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) UNUSED(__VA_ARGS__)
+#define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__)
+#define TF_LITE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__)
+#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__)
 #endif  // TF_LITE_STRIP_ERROR_STRINGS
 
 // Check whether value is true, and if not return kTfLiteError from
@@ -842,6 +842,12 @@ typedef struct TfLiteContext {
                                    size_t* bytes);
 } TfLiteContext;
 
+// `TfLiteRegistrationExternal` is an external version of `TfLiteRegistration`
+// for C API which doesn't use internal types (such as `TfLiteContext`) but only
+// uses stable API types (such as `TfLiteOpaqueContext`). The purpose of each
+// field is the exactly the same as with `TfLiteRegistration`.
+typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal;
+
 typedef struct TfLiteRegistration {
   // Initializes the op from serialized data.
   // Called only *once* for the lifetime of the op, so any one-time allocations
@@ -903,8 +909,31 @@ typedef struct TfLiteRegistration {
   // Note: It is the responsibility of the registration binder to set this
   // properly.
   int version;
+
+  // The external version of `TfLiteRegistration`. Since we can't use internal
+  // types (such as `TfLiteContext`) for C API to maintain ABI stability.
+  // C API user will provide `TfLiteRegistrationExternal` to implement custom
+  // ops. We keep it inside of `TfLiteRegistration` and use it to route
+  // callbacks properly.
+  TfLiteRegistrationExternal* registration_external;
 } TfLiteRegistration;
 
+// Old version of `TfLiteRegistration` to maintain binary backward
+// compatibility.
+// WARNING: This structure is deprecated / not an official part of the API.
+// It should be only used for binary backward compatibility.
+typedef struct TfLiteRegistration_V1 {
+  void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
+  void (*free)(TfLiteContext* context, void* buffer);
+  TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
+  TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
+  const char* (*profiling_string)(const TfLiteContext* context,
+                                  const TfLiteNode* node);
+  int32_t builtin_code;
+  const char* custom_name;
+  int version;
+} TfLiteRegistration_V1;
+
 // The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the
 // values should be 1, 2, 4, 8, ...etc.
 typedef enum TfLiteDelegateFlags {

+ 33 - 15
code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.cc

@@ -493,6 +493,11 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
       return ParseSquare(op, error_reporter, allocator, builtin_data);
     }
 
+    case BuiltinOperator_SQUARED_DIFFERENCE: {
+      return ParseSquaredDifference(op, error_reporter, allocator,
+                                    builtin_data);
+    }
+
     case BuiltinOperator_SQUEEZE: {
       return ParseSqueeze(op, error_reporter, allocator, builtin_data);
     }
@@ -840,14 +845,25 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
     // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
     // ok for now, since there is no call implementation either.
     case BuiltinOperator_CALL:
+    case BuiltinOperator_COMPLEX_ABS:
     case BuiltinOperator_CONCAT_EMBEDDINGS:
     case BuiltinOperator_COS:
     case BuiltinOperator_CUSTOM:
+    case BuiltinOperator_DENSIFY:
+    case BuiltinOperator_DYNAMIC_UPDATE_SLICE:
     case BuiltinOperator_EMBEDDING_LOOKUP:
     case BuiltinOperator_EQUAL:
+    case BuiltinOperator_HASHTABLE_FIND:
+    case BuiltinOperator_HASHTABLE_IMPORT:
+    case BuiltinOperator_HASHTABLE_SIZE:
+    case BuiltinOperator_IMAG:
     case BuiltinOperator_MATRIX_DIAG:
     case BuiltinOperator_MATRIX_SET_DIAG:
+    case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
+    case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
     case BuiltinOperator_RELU_N1_TO_1:
+    case BuiltinOperator_RELU_0_TO_1:
+    case BuiltinOperator_SCATTER_ND:
     case BuiltinOperator_SELECT:
     case BuiltinOperator_SELECT_V2:
     case BuiltinOperator_SLICE:
@@ -855,23 +871,17 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
     case BuiltinOperator_TOPK_V2:
     case BuiltinOperator_TRANSPOSE:
     case BuiltinOperator_RANGE:
-    case BuiltinOperator_SQUARED_DIFFERENCE:
-    case BuiltinOperator_REVERSE_V2:
-    case BuiltinOperator_WHERE:
     case BuiltinOperator_RANK:
-    case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
-    case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
-    case BuiltinOperator_SCATTER_ND:
-    case BuiltinOperator_DENSIFY:
-    case BuiltinOperator_SEGMENT_SUM:
-    case BuiltinOperator_RFFT2D:
-    case BuiltinOperator_IMAG:
     case BuiltinOperator_REAL:
-    case BuiltinOperator_COMPLEX_ABS:
-    case BuiltinOperator_HASHTABLE_FIND:
-    case BuiltinOperator_HASHTABLE_IMPORT:
-    case BuiltinOperator_HASHTABLE_SIZE:
-    case BuiltinOperator_DYNAMIC_UPDATE_SLICE:
+    case BuiltinOperator_RFFT2D:
+    case BuiltinOperator_SEGMENT_SUM:
+    case BuiltinOperator_REVERSE_V2:
+    case BuiltinOperator_UNSORTED_SEGMENT_MAX:
+    case BuiltinOperator_UNSORTED_SEGMENT_MIN:
+    case BuiltinOperator_UNSORTED_SEGMENT_PROD:
+    case BuiltinOperator_UNSORTED_SEGMENT_SUM:
+    case BuiltinOperator_ATAN2:
+    case BuiltinOperator_WHERE:
       return kTfLiteOk;
     case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
       return kTfLiteError;
@@ -2189,6 +2199,14 @@ TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
   return kTfLiteOk;
 }
 
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSquaredDifference(const Operator*, ErrorReporter*,
+                                    BuiltinDataAllocator*, void**) {
+  return kTfLiteOk;
+}
+
 TfLiteStatus ParseStridedSlice(const Operator* op,
                                ErrorReporter* error_reporter,
                                BuiltinDataAllocator* allocator,

+ 5 - 0
code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.h

@@ -356,6 +356,11 @@ TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter,
 TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter,
                          BuiltinDataAllocator* allocator, void** builtin_data);
 
+TfLiteStatus ParseSquaredDifference(const Operator* op,
+                                    ErrorReporter* error_reporter,
+                                    BuiltinDataAllocator* allocator,
+                                    void** builtin_data);
+
 TfLiteStatus ParseStridedSlice(const Operator* op,
                                ErrorReporter* error_reporter,
                                BuiltinDataAllocator* allocator,

+ 52 - 1
code/components/tflite-lib/tensorflow/lite/core/api/op_resolver.h

@@ -23,6 +23,16 @@ limitations under the License.
 #include "tensorflow/lite/core/api/error_reporter.h"
 #include "tensorflow/lite/schema/schema_generated.h"
 
+// Opaque type similar to TfLiteDelegate / TfLiteOpaqueDelegate.
+// This is used for cases (e.g. when using "TF Lite with Google Play Services")
+// where the TF Lite runtime might be built using a newer (or older)
+// version of the TF Lite sources than the app, and hence might have a
+// different definition of the TfLiteDelegate type. TF Lite APIs use
+// TfLiteOpaqueDelegate rather than TfLiteDelegate when they want to
+// refer to a delegate defined with that potentially different version
+// of the TfLiteDelegate type.
+struct TfLiteOpaqueDelegateStruct;
+
 namespace tflite {
 
 /// Abstract interface that returns TfLiteRegistrations given op codes or custom
@@ -37,8 +47,10 @@ class OpResolver {
   virtual const TfLiteRegistration* FindOp(const char* op,
                                            int version) const = 0;
 
+  // Represents a sequence of delegates.
   using TfLiteDelegatePtrVector =
       std::vector<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>>;
+
   // Returns optional delegates for resolving and handling ops in the flatbuffer
   // model. This may be used in addition to the standard TfLiteRegistration
   // lookup for graph resolution.
@@ -47,16 +59,55 @@ class OpResolver {
     return {};
   }
 
-  // Represent a function that creates a TfLite delegate instance.
+  // Represents a function that creates a TfLite delegate instance.
   using TfLiteDelegateCreator =
       std::function<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>(
           int /*num_threads*/)>;
+
+  // Represents a sequence of delegate creator functions.
   using TfLiteDelegateCreators = std::vector<TfLiteDelegateCreator>;
+
   // Returns a vector of delegate creators to create optional delegates for
   // resolving and handling ops in the flatbuffer model. This may be used in
   // addition to the standard TfLiteRegistration lookup for graph resolution.
+  //
+  // Note that this method is not used (will not be called) if you are using
+  // TF Lite in Google Play Services; the GetOpaqueDelegateCreators method
+  // (see below) is used for that case.
   virtual TfLiteDelegateCreators GetDelegateCreators() const { return {}; }
 
+  // TODO(b/202712825): it would be nice if we could avoid the need for separate
+  // "opaque" types & methods for use only with TF Lite in Google Play Services.
+
+  // Represents an opaque delegate instance.
+  // WARNING: Experimental interface, subject to change.
+  using TfLiteOpaqueDelegatePtr =
+      std::unique_ptr<TfLiteOpaqueDelegateStruct,
+                      void (*)(TfLiteOpaqueDelegateStruct*)>;
+
+  // Represents a function that creates an opaque delegate instance.
+  // WARNING: Experimental interface, subject to change.
+  using TfLiteOpaqueDelegateCreator =
+      std::function<TfLiteOpaqueDelegatePtr(int /*num_threads*/)>;
+
+  // Represents a sequence of opaque delegate creator functions.
+  // WARNING: Experimental interface, subject to change.
+  using TfLiteOpaqueDelegateCreators = std::vector<TfLiteOpaqueDelegateCreator>;
+
+  // Returns a vector of opaque delegate creators to create optional opaque
+  // delegates for resolving and handling ops in the flatbuffer model. This may
+  // be used in addition to the standard TfLiteRegistration lookup for graph
+  // resolution.
+  //
+  // Note that this method will be called only if you are using TF Lite in
+  // Google Play Services; if you are using regular TF Lite, GetDelegateCreators
+  // (see above) is used instead.
+  //
+  // WARNING: Experimental interface, subject to change.
+  virtual TfLiteOpaqueDelegateCreators GetOpaqueDelegateCreators() const {
+    return {};
+  }
+
   virtual ~OpResolver() {}
 
  private:

+ 4 - 4
code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft.cc

@@ -13,10 +13,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 #include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
-#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h"
 
 #include <string.h>
 
+#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h"
 
 void FftCompute(struct FftState* state, const int16_t* input,
                 int input_scale_shift) {
@@ -37,9 +37,9 @@ void FftCompute(struct FftState* state, const int16_t* input,
 
   // Apply the FFT.
   kissfft_fixed16::kiss_fftr(
-    reinterpret_cast<kissfft_fixed16::kiss_fftr_cfg>(state->scratch),
-    state->input,
-    reinterpret_cast<kissfft_fixed16::kiss_fft_cpx*>(state->output));
+      reinterpret_cast<kissfft_fixed16::kiss_fftr_cfg>(state->scratch),
+      state->input,
+      reinterpret_cast<kissfft_fixed16::kiss_fft_cpx*>(state->output));
 }
 
 void FftInit(struct FftState* state) {

+ 2 - 1
code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc

@@ -13,10 +13,11 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 #include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
-#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h"
 
 #include <stdio.h>
 
+#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h"
+
 int FftPopulateState(struct FftState* state, size_t input_size) {
   state->input_size = input_size;
   state->fft_size = 1;

+ 0 - 1
code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h

@@ -31,4 +31,3 @@ namespace kissfft_fixed16 {
 #undef KISS_FFT_H
 
 #endif  // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_INT16_H_
-

+ 1 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/common.h

@@ -15,6 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
 
+#include <algorithm>
 #ifndef ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
 #ifdef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
 #define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK

+ 10 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/compatibility.h

@@ -86,6 +86,16 @@ using int32 = std::int32_t;
 using uint32 = std::uint32_t;
 #endif  // !defined(TF_LITE_STATIC_MEMORY)
 
+// Allow for cross-compiler usage of function signatures - currently used for
+// specifying named RUY profiler regions in templated methods.
+#if defined(_MSC_VER)
+#define TFLITE_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__)
+#define TFLITE_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#define TFLITE_PRETTY_FUNCTION __func__
+#endif
+
 // TFLITE_DEPRECATED()
 //
 // Duplicated from absl/base/macros.h to avoid pulling in that library.

+ 1 - 1
code/components/tflite-lib/tensorflow/lite/kernels/internal/portable_tensor_utils.h

@@ -324,7 +324,7 @@ void ApplySigmoidFloat(const int16_t* input, int32_t n_batch, int32_t n_input,
 //     - n_input: the size for input and output.
 //     - output:  the 16 bit output
 // The input is in Qm.15-m format and the output is in Q0.15 format.
-void ApplyTanh(int32_t integer_bits, const int16_t* input, int32_t n_batch,
+void ApplyTanh(int32_t intger_bits, const int16_t* input, int32_t n_batch,
                int32_t n_input, int16_t* output);
 
 // Apply Tanh to a quantized vector. Tbe internal calculation is in float.

+ 1 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/add.h

@@ -15,6 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
 
+#include <algorithm>
 #include <type_traits>
 
 #include "fixedpoint/fixedpoint.h"

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/concatenation.h

@@ -16,6 +16,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/compatibility.h"
 #include "tensorflow/lite/kernels/internal/cppmath.h"

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/conv.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/types.h"
 

+ 247 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/div.h

@@ -0,0 +1,247 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
+#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
+
+#include <algorithm>
+
+#include "tensorflow/lite/kernels/internal/common.h"
+
+namespace tflite {
+
+namespace reference_ops {
+
+template <typename T>
+inline void DivCheckArithmeticParams(const ArithmeticParams& params) {
+  TFLITE_DCHECK_LE(params.quantized_activation_min,
+                   params.quantized_activation_max);
+  // Input offset is negative input zero point. Activation tensors are
+  // asymmetric quantized so they span the full int8 range.
+  constexpr int32_t max_value =
+      static_cast<int32_t>(std::numeric_limits<T>::max());
+  TFLITE_DCHECK_GE(params.input1_offset, -max_value);
+  TFLITE_DCHECK_LE(params.input1_offset, max_value);
+  TFLITE_DCHECK_GE(params.input2_offset, -max_value);
+  TFLITE_DCHECK_LE(params.input2_offset, max_value);
+  TFLITE_DCHECK_GE(params.output_offset, -max_value);
+  TFLITE_DCHECK_LE(params.output_offset, max_value);
+}
+
+// Element-wise div that can often be used for inner loop of broadcast Div as
+// well as the non-broadcast Div.
+template <typename T>
+inline void DivElementwise(int size, const ArithmeticParams& params,
+                           const T* input1_data, const T* input2_data,
+                           T* output_data) {
+  DivCheckArithmeticParams<T>(params);
+
+  for (int i = 0; i < size; ++i) {
+    int32_t input1_val = params.input1_offset + input1_data[i];
+    int32_t input2_val = params.input2_offset + input2_data[i];
+    TFLITE_DCHECK_NE(input2_val, 0);
+    if (input2_val < 0) {
+      // Invert signs to avoid a negative input2_val as input2_inv needs to be
+      // positive to be used as multiplier of MultiplyByQuantizedMultiplier.
+      input1_val = -input1_val;
+      input2_val = -input2_val;
+    }
+    int recip_shift;
+    const int32_t input2_inv = GetReciprocal(input2_val, 31, &recip_shift);
+    const int headroom = CountLeadingSignBits(input1_val);
+    const int32_t unscaled_quotient =
+        MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv,
+                                                    headroom);
+    const int total_shift = params.output_shift - recip_shift - headroom;
+    const int32_t unclamped_result =
+        params.output_offset +
+        MultiplyByQuantizedMultiplierSmallerThanOneExp(
+            unscaled_quotient, params.output_multiplier, total_shift);
+    const int32_t clamped_output =
+        std::min(params.quantized_activation_max,
+                 std::max(params.quantized_activation_min, unclamped_result));
+    output_data[i] = static_cast<T>(clamped_output);
+  }
+}
+
+inline void Div(const ArithmeticParams& params,
+                const RuntimeShape& input1_shape, const uint8_t* input1_data,
+                const RuntimeShape& input2_shape, const uint8_t* input2_data,
+                const RuntimeShape& output_shape, uint8_t* output_data) {
+  TFLITE_DCHECK_LE(params.quantized_activation_min,
+                   params.quantized_activation_max);
+  const int flat_size =
+      MatchingElementsSize(input1_shape, input2_shape, output_shape);
+
+  DivElementwise(flat_size, params, input1_data, input2_data, output_data);
+}
+
+inline void Div(const ArithmeticParams& params,
+                const RuntimeShape& input1_shape, const int8_t* input1_data,
+                const RuntimeShape& input2_shape, const int8_t* input2_data,
+                const RuntimeShape& output_shape, int8_t* output_data) {
+  TFLITE_DCHECK_LE(params.quantized_activation_min,
+                   params.quantized_activation_max);
+  const int flat_size =
+      MatchingElementsSize(input1_shape, input2_shape, output_shape);
+
+  DivElementwise(flat_size, params, input1_data, input2_data, output_data);
+}
+
+template <typename T, int N = 5>
+inline void BroadcastDivSlowQuantized(
+    const ArithmeticParams& params, const RuntimeShape& unextended_input1_shape,
+    const T* input1_data, const RuntimeShape& unextended_input2_shape,
+    const T* input2_data, const RuntimeShape& unextended_output_shape,
+    T* output_data) {
+  TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
+  TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
+  TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
+
+  NdArrayDesc<N> desc1;
+  NdArrayDesc<N> desc2;
+  NdArrayDesc<N> output_desc;
+  NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
+                                      unextended_input2_shape, &desc1, &desc2);
+  CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
+                 &output_desc);
+
+  DivCheckArithmeticParams<T>(params);
+
+  auto div_func = [&](int indexes[N]) {
+    int32_t input1_val =
+        params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
+    int32_t input2_val =
+        params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
+    TFLITE_DCHECK_NE(input2_val, 0);
+    if (input2_val < 0) {
+      // Invert signs to avoid a negative input2_val as input2_inv needs to be
+      // positive to be used as multiplier of MultiplyByQuantizedMultiplier.
+      input1_val = -input1_val;
+      input2_val = -input2_val;
+    }
+    int recip_shift;
+    const int32_t input2_inv = GetReciprocal(input2_val, 31, &recip_shift);
+    const int headroom = CountLeadingSignBits(input1_val);
+    const int32_t unscaled_quotient =
+        MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv,
+                                                    headroom);
+    const int total_shift = params.output_shift - recip_shift - headroom;
+    const int32_t unclamped_result =
+        params.output_offset +
+        MultiplyByQuantizedMultiplierSmallerThanOneExp(
+            unscaled_quotient, params.output_multiplier, total_shift);
+    const int32_t clamped_output =
+        std::min(params.quantized_activation_max,
+                 std::max(params.quantized_activation_min, unclamped_result));
+    output_data[SubscriptToIndex(output_desc, indexes)] =
+        static_cast<T>(clamped_output);
+  };
+  NDOpsHelper<N>(output_desc, div_func);
+}
+
+template <int N = 5>
+inline void BroadcastDivSlow(const ArithmeticParams& params,
+                             const RuntimeShape& unextended_input1_shape,
+                             const uint8_t* input1_data,
+                             const RuntimeShape& unextended_input2_shape,
+                             const uint8_t* input2_data,
+                             const RuntimeShape& unextended_output_shape,
+                             uint8_t* output_data) {
+  BroadcastDivSlowQuantized<uint8_t, N>(
+      params, unextended_input1_shape, input1_data, unextended_input2_shape,
+      input2_data, unextended_output_shape, output_data);
+}
+
+template <int N = 5>
+inline void BroadcastDivSlow(const ArithmeticParams& params,
+                             const RuntimeShape& unextended_input1_shape,
+                             const int8_t* input1_data,
+                             const RuntimeShape& unextended_input2_shape,
+                             const int8_t* input2_data,
+                             const RuntimeShape& unextended_output_shape,
+                             int8_t* output_data) {
+  BroadcastDivSlowQuantized<int8_t, N>(
+      params, unextended_input1_shape, input1_data, unextended_input2_shape,
+      input2_data, unextended_output_shape, output_data);
+}
+
+// TODO(jiawen): We can implement BroadcastDiv on buffers of arbitrary
+// dimensionality if the runtime code does a single loop over one dimension
+// that handles broadcasting as the base case. The code generator would then
+// generate max(D1, D2) nested for loops.
+template <typename T, int N = 5>
+void BroadcastDivSlow(const ArithmeticParams& params,
+                      const RuntimeShape& unextended_input1_shape,
+                      const T* input1_data,
+                      const RuntimeShape& unextended_input2_shape,
+                      const T* input2_data,
+                      const RuntimeShape& unextended_output_shape,
+                      T* output_data) {
+  T output_activation_min;
+  T output_activation_max;
+  GetActivationParams(params, &output_activation_min, &output_activation_max);
+
+  TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
+  TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
+  TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
+
+  NdArrayDesc<N> desc1;
+  NdArrayDesc<N> desc2;
+  NdArrayDesc<N> output_desc;
+  NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
+                                      unextended_input2_shape, &desc1, &desc2);
+  CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
+                 &output_desc);
+
+  // In Tensorflow, the dimensions are canonically named (batch_number, row,
+  // col, channel), with extents (batches, height, width, depth), with the
+  // trailing dimension changing most rapidly (channels has the smallest
+  // stride, typically 1 element).
+  //
+  // In generated C code, we store arrays with the dimensions reversed. The
+  // first dimension has smallest stride.
+
+  auto div_func = [&](int indexes[N]) {
+    output_data[SubscriptToIndex(output_desc, indexes)] =
+        ActivationFunctionWithMinMax(
+            input1_data[SubscriptToIndex(desc1, indexes)] /
+                input2_data[SubscriptToIndex(desc2, indexes)],
+            output_activation_min, output_activation_max);
+  };
+  NDOpsHelper<N>(output_desc, div_func);
+}
+
+template <typename T>
+inline void Div(const ArithmeticParams& params,
+                const RuntimeShape& input1_shape, const T* input1_data,
+                const RuntimeShape& input2_shape, const T* input2_data,
+                const RuntimeShape& output_shape, T* output_data) {
+  T output_activation_min;
+  T output_activation_max;
+  GetActivationParams(params, &output_activation_min, &output_activation_max);
+
+  const int flat_size =
+      MatchingElementsSize(input1_shape, input2_shape, output_shape);
+  for (int i = 0; i < flat_size; ++i) {
+    output_data[i] = ActivationFunctionWithMinMax(
+        input1_data[i] / input2_data[i], output_activation_min,
+        output_activation_max);
+  }
+}
+
+}  // namespace reference_ops
+}  // namespace tflite
+
+#endif  // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/fully_connected.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
 
+#include <algorithm>
+
 #include "ruy/profiler/instrumentation.h"  // from @ruy
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/cppmath.h"

+ 5 - 3
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/hard_swish.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_
 
+#include <algorithm>
+
 #include "ruy/profiler/instrumentation.h"  // from @ruy
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/types.h"
@@ -23,9 +25,9 @@ namespace tflite {
 namespace reference_ops {
 
 inline int16_t SaturatingLeftShift(int16_t value, int amount) {
-  int32_t result = static_cast<int32_t>(value) * (1 << amount);
-  result = std::min<int32_t>(result, std::numeric_limits<int16_t>::max());
-  result = std::max<int32_t>(result, std::numeric_limits<int16_t>::min());
+  int64_t result = static_cast<int64_t>(value) * (1 << amount);
+  result = std::min<int64_t>(result, std::numeric_limits<int16_t>::max());
+  result = std::max<int64_t>(result, std::numeric_limits<int16_t>::min());
   return result;
 }
 

+ 1 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/add.h

@@ -15,6 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
 
+#include <algorithm>
 #include <limits>
 
 #include "tensorflow/lite/kernels/internal/common.h"

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {

+ 90 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h

@@ -15,11 +15,101 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {
 namespace reference_integer_ops {
 
+// For per-channel functions, since it is defined in quantization spec that
+// weights are symmetric
+// (https://www.tensorflow.org/lite/performance/quantization_spec#symmetric_vs_asymmetric),
+// zero_point (params.weights_offset) is always 0.
+// However, for per-tensor functions, params.weights_offset is still applied for
+// backward compatibility.
+
+inline void FullyConnectedPerChannel(
+    const FullyConnectedParams& params, const int32_t* output_multiplier,
+    const int* output_shift, const RuntimeShape& input_shape,
+    const int8_t* input_data, const RuntimeShape& filter_shape,
+    const int8_t* filter_data, const RuntimeShape& bias_shape,
+    const int32_t* bias_data, const RuntimeShape& output_shape,
+    int8_t* output_data) {
+  const int32_t input_offset = params.input_offset;
+  const int32_t output_offset = params.output_offset;
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
+  TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
+  TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
+
+  TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+  const int filter_dim_count = filter_shape.DimensionsCount();
+  const int batches = output_shape.Dims(0);
+  const int output_depth = output_shape.Dims(1);
+  TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2));
+  const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
+  for (int b = 0; b < batches; ++b) {
+    for (int out_c = 0; out_c < output_depth; ++out_c) {
+      int32_t acc = 0;
+      for (int d = 0; d < accum_depth; ++d) {
+        int32_t input_val = input_data[b * accum_depth + d];
+        int32_t filter_val = filter_data[out_c * accum_depth + d];
+        acc += filter_val * (input_val + input_offset);
+      }
+      if (bias_data) {
+        acc += bias_data[out_c];
+      }
+      acc = MultiplyByQuantizedMultiplier(acc, output_multiplier[out_c],
+                                          output_shift[out_c]);
+      acc += output_offset;
+      acc = std::max(acc, output_activation_min);
+      acc = std::min(acc, output_activation_max);
+      output_data[out_c + output_depth * b] = static_cast<int8_t>(acc);
+    }
+  }
+}
+
+template <typename AccumScalar>
+inline void FullyConnectedPerChannel(
+    const FullyConnectedParams& params, const int32_t* output_multiplier,
+    const int* output_shift, const RuntimeShape& input_shape,
+    const int16_t* input_data, const RuntimeShape& filter_shape,
+    const int8_t* filter_data, const RuntimeShape& bias_shape,
+    const AccumScalar* bias_data, const RuntimeShape& output_shape,
+    int16_t* output_data) {
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
+  TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
+  TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
+
+  TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+  const int filter_dim_count = filter_shape.DimensionsCount();
+  const int output_dim_count = output_shape.DimensionsCount();
+  const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
+  const int output_depth = output_shape.Dims(output_dim_count - 1);
+  TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2));
+  const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
+  for (int b = 0; b < batches; ++b) {
+    for (int out_c = 0; out_c < output_depth; ++out_c) {
+      AccumScalar acc = 0;
+      for (int d = 0; d < accum_depth; ++d) {
+        int32_t input_val = input_data[b * accum_depth + d];
+        int32_t filter_val = filter_data[out_c * accum_depth + d];
+        acc += filter_val * input_val;
+      }
+      if (bias_data) {
+        acc += bias_data[out_c];
+      }
+      int32_t acc_scaled = MultiplyByQuantizedMultiplier(
+          acc, output_multiplier[out_c], output_shift[out_c]);
+      acc_scaled = std::max(acc_scaled, output_activation_min);
+      acc_scaled = std::min(acc_scaled, output_activation_max);
+      output_data[out_c + output_depth * b] = static_cast<int16_t>(acc_scaled);
+    }
+  }
+}
+
 inline void FullyConnected(
     const FullyConnectedParams& params, const RuntimeShape& input_shape,
     const int8_t* input_data, const RuntimeShape& filter_shape,

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h

@@ -15,7 +15,9 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_
 
+#include <algorithm>
 #include <limits>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
 
+#include <algorithm>
+
 #include "fixedpoint/fixedpoint.h"
 #include "ruy/profiler/instrumentation.h"  // from @ruy
 #include "tensorflow/lite/kernels/internal/common.h"

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h

@@ -15,7 +15,9 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_
 
+#include <algorithm>
 #include <limits>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {

+ 1 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h

@@ -15,6 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
 
+#include <algorithm>
 #include <limits>
 
 #include "fixedpoint/fixedpoint.h"

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/mul.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 
 namespace tflite {

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/pooling.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/cppmath.h"
 #include "tensorflow/lite/kernels/internal/quantization_util.h"

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/prelu.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/compatibility.h"
 #include "tensorflow/lite/kernels/internal/types.h"

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/types.h"
 
 namespace tflite {

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/reduce.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
 
+#include <algorithm>
+
 #include "ruy/profiler/instrumentation.h"  // from @ruy
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/cppmath.h"

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/requantize.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_
 
+#include <algorithm>
+
 #include "ruy/profiler/instrumentation.h"  // from @ruy
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/types.h"

+ 1 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h

@@ -15,6 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_
 
+#include <algorithm>
 #include <cmath>
 
 #include "tensorflow/lite/kernels/internal/cppmath.h"

+ 1 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/softmax.h

@@ -15,6 +15,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_
 
+#include <algorithm>
 #include <limits>
 
 #include "fixedpoint/fixedpoint.h"

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/transpose_conv.h

@@ -15,6 +15,8 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_
 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_
 
+#include <algorithm>
+
 #include "tensorflow/lite/kernels/internal/common.h"
 #include "tensorflow/lite/kernels/internal/types.h"
 

+ 6 - 3
code/components/tflite-lib/tensorflow/lite/kernels/internal/runtime_shape.h

@@ -27,6 +27,11 @@ class RuntimeShape {
  public:
   RuntimeShape& operator=(RuntimeShape const&) = delete;
 
+  // RuntimeShape in TFLM supports up to 5 dimensions.
+  // The name kMaxSmallSize comes from the same file of the upstream
+  // tensorflow lite repo and need to be kept the same for max reuse.
+  static constexpr int kMaxSmallSize = 5;
+
   RuntimeShape() : size_(0) {}
 
   explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) {}
@@ -104,11 +109,9 @@ class RuntimeShape {
                 sizeof(int32_t) * shape.DimensionsCount());
   }
 
-  // A maximum of 4 dimensions are supported on TFLM.
-  static constexpr int kMaxSize = 5;
   int32_t size_;
   union {
-    int32_t dims_[kMaxSize];
+    int32_t dims_[kMaxSmallSize];
   };
 };
 

+ 5 - 5
code/components/tflite-lib/tensorflow/lite/kernels/internal/types.h

@@ -974,11 +974,11 @@ struct StridedSliceParams {
   int8_t strides_count;
   int32_t strides[5];
 
-  int16_t begin_mask;
-  int16_t ellipsis_mask;
-  int16_t end_mask;
-  int16_t new_axis_mask;
-  int16_t shrink_axis_mask;
+  uint16_t begin_mask;
+  uint16_t ellipsis_mask;
+  uint16_t end_mask;
+  uint16_t new_axis_mask;
+  uint16_t shrink_axis_mask;
 };
 
 struct TanhParams {

+ 9 - 1
code/components/tflite-lib/tensorflow/lite/kernels/kernel_util.h

@@ -177,6 +177,14 @@ inline int64_t NumElements(const TfLiteTensor* t) {
   return NumElements(t->dims);
 }
 
+inline int64_t NumElements(const int* dims, int num_dims) {
+  int64_t count = 1;
+  for (int i = 0; i < num_dims; ++i) {
+    count *= dims[i];
+  }
+  return count;
+}
+
 // Determines whether tensor is constant.
 // TODO(b/138199592): Introduce new query which checks for constant OR
 // persistent-read-only, which would be useful for most tensor kernels that
@@ -308,7 +316,7 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
                                         const TfLiteTensor* input3,
                                         TfLiteIntArray** output_shape);
 
-// Return the size of given type in bytes. Return 0 in in case of string.
+// Return the size of given type in bytes. Return 0 in case of string.
 int TfLiteTypeGetSize(TfLiteType type);
 
 // Whether the current platform is mobile (Android or iOS).

+ 2 - 0
code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.cc

@@ -43,6 +43,7 @@ AllOpsResolver::AllOpsResolver() {
   AddDepthwiseConv2D();
   AddDequantize();
   AddDetectionPostprocess();
+  AddDiv();
   AddElu();
   AddEqual();
   AddEthosU();
@@ -104,6 +105,7 @@ AllOpsResolver::AllOpsResolver() {
   AddSqueeze();
   AddStridedSlice();
   AddSub();
+  AddSum();
   AddSvdf();
   AddTanh();
   AddTranspose();

+ 3 - 3
code/components/tflite-lib/tensorflow/lite/micro/ibuffer_allocator.h → code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h

@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
-#ifndef TENSORFLOW_LITE_MICRO_IBUFFER_ALLOCATOR_H_
-#define TENSORFLOW_LITE_MICRO_IBUFFER_ALLOCATOR_H_
+#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_
+#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_
 
 #include <cstddef>
 #include <cstdint>
@@ -97,4 +97,4 @@ class INonPersistentBufferAllocator {
 
 }  // namespace tflite
 
-#endif  // TENSORFLOW_LITE_MICRO_IBUFFER_ALLOCATOR_H_
+#endif  // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_

+ 170 - 0
code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.cc

@@ -0,0 +1,170 @@
+/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h"
+
+#include "tensorflow/lite/micro/memory_helpers.h"
+#include "tensorflow/lite/micro/micro_error_reporter.h"
+
+namespace tflite {
+
+NonPersistentArenaBufferAllocator::NonPersistentArenaBufferAllocator(
+    uint8_t* buffer, size_t buffer_size)
+    : buffer_head_(buffer),
+      buffer_tail_(buffer + buffer_size),
+      head_temp_(buffer),
+      next_temp_(buffer) {}
+
+NonPersistentArenaBufferAllocator::~NonPersistentArenaBufferAllocator() {}
+
+// Allocates a temporary buffer. This buffer is not resizable.
+uint8_t* NonPersistentArenaBufferAllocator::AllocateTemp(size_t size,
+                                                         size_t alignment) {
+  uint8_t* const aligned_result = AlignPointerUp(next_temp_, alignment);
+  const size_t available_memory = buffer_tail_ - aligned_result;
+  if (available_memory < size) {
+    MicroPrintf(
+        "Failed to allocate temp memory. Requested: %u, "
+        "available %u, missing: %u",
+        size, available_memory, size - available_memory);
+    return nullptr;
+  }
+  next_temp_ = aligned_result + size;
+  temp_buffer_ptr_check_sum_ ^= reinterpret_cast<intptr_t>(aligned_result);
+  temp_buffer_count_++;
+  return aligned_result;
+}
+
+// Signals that a temporary buffer is no longer needed.
+void NonPersistentArenaBufferAllocator::DeallocateTemp(uint8_t* temp_buf) {
+  temp_buffer_ptr_check_sum_ ^= reinterpret_cast<intptr_t>(temp_buf);
+  temp_buffer_count_--;
+}
+
+// Returns true if all temporary buffers are already deallocated.
+bool NonPersistentArenaBufferAllocator::IsAllTempDeallocated() {
+  if (temp_buffer_count_ != 0 || temp_buffer_ptr_check_sum_ != 0) {
+    MicroPrintf(
+        "Number of allocated temp buffers: %d. Checksum passing status: %d",
+        temp_buffer_count_, !temp_buffer_ptr_check_sum_);
+    return false;
+  }
+  return true;
+}
+
+// Signals that all temporary allocations can be reclaimed. TFLM calls this
+// API when it knows that all temporary buffers that it requested has been
+// deallocated. The goal of API is to facilitate implementations of
+// INonPersistentBufferAllocator can reuse buffer with some reasonable
+// complexity.
+TfLiteStatus NonPersistentArenaBufferAllocator::ResetTempAllocations() {
+  if (!IsAllTempDeallocated()) {
+    MicroPrintf(
+        "All temp buffers must be freed before calling ResetTempAllocations()");
+    return kTfLiteError;
+  }
+  next_temp_ = head_temp_;
+  return kTfLiteOk;
+}
+
+// Returns a buffer that is resizable viable ResizeBuffer().
+uint8_t* NonPersistentArenaBufferAllocator::AllocateResizableBuffer(
+    size_t size, size_t alignment) {
+  // Only supports one resizable buffer, which starts at the buffer head.
+  uint8_t* expected_resizable_buf = AlignPointerUp(buffer_head_, alignment);
+
+  if (resizable_buffer_allocated_) {
+    MicroPrintf(
+        "Cannot allocate a new resizable buffer when one is already allocated");
+    return nullptr;
+  }
+
+  if (ResizeBuffer(expected_resizable_buf, size, alignment) == kTfLiteOk) {
+    resizable_buffer_allocated_ = true;
+    return expected_resizable_buf;
+  }
+  return nullptr;
+}
+
+// Resizes a buffer that is previously returned by the AllocateResizableBuffer.
+// Note that ResizeBuffer(old_resizable_buf, 0, 1) effectively deallocates
+// a previous allocated resizable buffer.
+TfLiteStatus NonPersistentArenaBufferAllocator::ResizeBuffer(
+    uint8_t* resizable_buf, size_t size, size_t alignment) {
+  // Only supports one resizable buffer, which starts at the buffer head.
+  uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment);
+  if (resizable_buf != expect_resizable_buf) {
+    MicroPrintf("Internal error: buffer is not resizable");
+    return kTfLiteError;
+  }
+  if (head_temp_ != next_temp_) {
+    MicroPrintf("ResetTempAllocations() is not called before ResizeBuffer().");
+    return kTfLiteError;
+  }
+
+  const size_t available_memory = buffer_tail_ - expect_resizable_buf;
+  if (available_memory < size) {
+    MicroPrintf(
+        "Failed to resize buffer. Requested: %u, available %u, missing: %u",
+        size, available_memory, size - available_memory);
+    return kTfLiteError;
+  }
+  head_temp_ = expect_resizable_buf + size;
+  next_temp_ = head_temp_;
+
+  return kTfLiteOk;
+}
+
+// Frees up the memory occupied by the resizable buffer.
+TfLiteStatus NonPersistentArenaBufferAllocator::DeallocateResizableBuffer(
+    uint8_t* resizable_buf) {
+  TfLiteStatus status = ResizeBuffer(resizable_buf, 0, 1);
+  if (status == kTfLiteOk) {
+    resizable_buffer_allocated_ = false;
+  }
+  return status;
+}
+
+// Returns a pointer pointing to the start of the overlay memory, which is
+// used for activation tensors and scratch buffers by kernels at Invoke stage.
+uint8_t* NonPersistentArenaBufferAllocator::GetOverlayMemoryAddress() const {
+  return buffer_head_;
+}
+
+// Reserves the size of the overlay memory. This overlay is reserved for the
+// kernels at Invoke stage. This is referred to as the overlay because before
+// Invoket state, the same memory can be used for temp buffers. The layout of
+// the memory is planned by the memory planner separately at Invoke stage.
+TfLiteStatus
+NonPersistentArenaBufferAllocator::ReserveNonPersistentOverlayMemory(
+    size_t size, size_t alignment) {
+  uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment);
+  return ResizeBuffer(expect_resizable_buf, size, alignment);
+}
+
+// Returns the size of non-persistent buffer in use.
+size_t NonPersistentArenaBufferAllocator::GetNonPersistentUsedBytes() const {
+  return (next_temp_ - buffer_head_);
+}
+
+// Returns the number of bytes available with a given alignment. This number
+// takes in account any temporary allocations.
+size_t NonPersistentArenaBufferAllocator::GetAvailableMemory(
+    size_t alignment) const {
+  uint8_t* const aligned_temp = AlignPointerUp(next_temp_, alignment);
+  uint8_t* const aligned_tail = AlignPointerDown(buffer_tail_, alignment);
+  return aligned_tail - aligned_temp;
+}
+
+}  // namespace tflite

+ 105 - 0
code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h

@@ -0,0 +1,105 @@
+/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_
+#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h"
+#include "tensorflow/lite/micro/compatibility.h"
+
+namespace tflite {
+
+// Implement INonPersistentBufferAllocator on an arena that is dedicated for
+// non-persistent buffers.
+class NonPersistentArenaBufferAllocator : public INonPersistentBufferAllocator {
+ public:
+  NonPersistentArenaBufferAllocator(uint8_t* buffer, size_t buffer_size);
+  virtual ~NonPersistentArenaBufferAllocator();
+
+  // Allocates a temporary buffer. This buffer is not resizable.
+  uint8_t* AllocateTemp(size_t size, size_t alignment) override;
+
+  // Signals that a temporary buffer is no longer needed.
+  void DeallocateTemp(uint8_t* buf) override;
+
+  // Returns true if all temporary buffers are already deallocated.
+  bool IsAllTempDeallocated() override;
+
+  // Signals that all temporary allocations can be reclaimed. TFLM calls this
+  // API when it knows that all temporary buffers that it requested has been
+  // deallocated.
+  TfLiteStatus ResetTempAllocations() override;
+
+  // Returns a buffer that is resizable viable ResizeBuffer().
+  uint8_t* AllocateResizableBuffer(size_t size, size_t alignment) override;
+
+  // Resizes a buffer that is previously returned by the
+  // AllocateResizableBuffer.
+  TfLiteStatus ResizeBuffer(uint8_t* resizable_buf, size_t size,
+                            size_t alignment) override;
+
+  // Frees up the memory occupied by the resizable buffer.
+  TfLiteStatus DeallocateResizableBuffer(uint8_t* resizable_buf) override;
+
+  // Returns a pointer pointing to the start of the overlay memory, which is
+  // used for activation tensors and scratch buffers by kernels at Invoke stage.
+  uint8_t* GetOverlayMemoryAddress() const override;
+
+  // Reserves the size of the overlay memory. This overlay is reserved for the
+  // kernels at Invoke stage. This is referred to as the overlay because before
+  // Invoket state, the same memory can be used for temp buffers. The layout of
+  // the memory is planned by the memory planner separately at Invoke stage.
+  TfLiteStatus ReserveNonPersistentOverlayMemory(size_t size,
+                                                 size_t alignment) override;
+
+  // Returns the size of non-persistent buffer in use.
+  size_t GetNonPersistentUsedBytes() const override;
+
+  // Returns the number of bytes available with a given alignment. This number
+  // takes in account any temporary allocations.
+  size_t GetAvailableMemory(size_t alignment) const override;
+
+  TF_LITE_REMOVE_VIRTUAL_DELETE
+
+ private:
+  // The memory arena that this allocator manages.
+  uint8_t* const buffer_head_;
+  uint8_t* const buffer_tail_;
+
+  // The whole region is split into two parts:
+  // buffer_head_ to head_temp_ - 1 belongs to the only resizable buffer.
+  // head_temp_ to buffer_tail_ can be used for (non-resizable) temp buffers.
+  uint8_t* head_temp_;
+
+  // next_temp_ points to the next available temp buffer allocation address and
+  // its range is between head_temp_ and buffer_tail_
+  uint8_t* next_temp_;
+
+  // XOR Check sum for outstanding temp buffers.
+  // If all temp buffers are deallocated OR no temp buffers are allocated,
+  // temp_buffer_ptr_check_sum_ == nullptr.
+  intptr_t temp_buffer_ptr_check_sum_ = 0;
+  // Count of outstanding temp buffers.
+  int temp_buffer_count_ = 0;
+  bool resizable_buffer_allocated_ = false;
+};
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_

+ 52 - 0
code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.cc

@@ -0,0 +1,52 @@
+/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h"
+
+#include "tensorflow/lite/micro/memory_helpers.h"
+#include "tensorflow/lite/micro/micro_error_reporter.h"
+
+namespace tflite {
+
+PersistentArenaBufferAllocator::PersistentArenaBufferAllocator(
+    uint8_t* buffer, size_t buffer_size)
+    : buffer_head_(buffer),
+      buffer_tail_(buffer + buffer_size),
+      tail_temp_(buffer_tail_) {}
+
+PersistentArenaBufferAllocator::~PersistentArenaBufferAllocator() {}
+
+uint8_t* PersistentArenaBufferAllocator::AllocatePersistentBuffer(
+    size_t size, size_t alignment) {
+  uint8_t* const aligned_result =
+      AlignPointerDown(tail_temp_ - size, alignment);
+  if (aligned_result < buffer_head_) {
+#ifndef TF_LITE_STRIP_ERROR_STRINGS
+    const size_t missing_memory = buffer_head_ - aligned_result;
+    MicroPrintf(
+        "Failed to allocate tail memory. Requested: %u, "
+        "available %u, missing: %u",
+        size, size - missing_memory, missing_memory);
+#endif
+    return nullptr;
+  }
+  tail_temp_ = aligned_result;
+  return aligned_result;
+}
+
+size_t PersistentArenaBufferAllocator::GetPersistentUsedBytes() const {
+  return buffer_tail_ - tail_temp_;
+}
+
+}  // namespace tflite

+ 59 - 0
code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h

@@ -0,0 +1,59 @@
+/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_
+#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h"
+#include "tensorflow/lite/micro/compatibility.h"
+
+namespace tflite {
+
+// PersistentArenaBufferAllocator is an implementatation of
+// IPersistentBufferAllocator interface on an arena that is dedicated for
+// persistent buffers.
+class PersistentArenaBufferAllocator : public IPersistentBufferAllocator {
+ public:
+  PersistentArenaBufferAllocator(uint8_t* buffer, size_t buffer_size);
+  virtual ~PersistentArenaBufferAllocator();
+
+  // Allocates persistent memory. The persistent buffer is never freed.
+  // Returns nullptr if errors occured.
+  uint8_t* AllocatePersistentBuffer(size_t size, size_t alignment) override;
+
+  // Returns the size of all persistent allocations in bytes.
+  size_t GetPersistentUsedBytes() const override;
+
+  TF_LITE_REMOVE_VIRTUAL_DELETE
+ private:
+  // The memory arena that this allocator manages.
+  uint8_t* const buffer_head_;
+  uint8_t* const buffer_tail_;
+
+  // The whole region is split into two parts:
+  // tail_temp_ to buffer_tail_ contains allocated buffers;
+  // buffer_head_ to tail_temp_ - 1 belongs to still available spaces.
+  // So in essence, the allocated region grows from the bottom and emulates
+  // SingleArenaBufferAllocator's persistent part.
+  uint8_t* tail_temp_;
+};
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_

+ 21 - 19
code/components/tflite-lib/tensorflow/lite/micro/recording_simple_memory_allocator.cc → code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.cc

@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/lite/micro/recording_simple_memory_allocator.h"
+#include "tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h"
 
 #include <new>
 
@@ -21,47 +21,49 @@ limitations under the License.
 
 namespace tflite {
 
-RecordingSimpleMemoryAllocator::RecordingSimpleMemoryAllocator(
+RecordingSingleArenaBufferAllocator::RecordingSingleArenaBufferAllocator(
     ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size)
-    : SimpleMemoryAllocator(error_reporter, buffer_head, buffer_size),
+    : SingleArenaBufferAllocator(error_reporter, buffer_head, buffer_size),
       requested_head_bytes_(0),
       requested_tail_bytes_(0),
       used_bytes_(0),
       alloc_count_(0) {}
 
-RecordingSimpleMemoryAllocator::~RecordingSimpleMemoryAllocator() {}
+RecordingSingleArenaBufferAllocator::~RecordingSingleArenaBufferAllocator() {}
 
-RecordingSimpleMemoryAllocator* RecordingSimpleMemoryAllocator::Create(
-    ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) {
+RecordingSingleArenaBufferAllocator*
+RecordingSingleArenaBufferAllocator::Create(ErrorReporter* error_reporter,
+                                            uint8_t* buffer_head,
+                                            size_t buffer_size) {
   TFLITE_DCHECK(error_reporter != nullptr);
   TFLITE_DCHECK(buffer_head != nullptr);
-  RecordingSimpleMemoryAllocator tmp =
-      RecordingSimpleMemoryAllocator(error_reporter, buffer_head, buffer_size);
+  RecordingSingleArenaBufferAllocator tmp = RecordingSingleArenaBufferAllocator(
+      error_reporter, buffer_head, buffer_size);
 
-  uint8_t* allocator_buffer =
-      tmp.AllocatePersistentBuffer(sizeof(RecordingSimpleMemoryAllocator),
-                                   alignof(RecordingSimpleMemoryAllocator));
+  uint8_t* allocator_buffer = tmp.AllocatePersistentBuffer(
+      sizeof(RecordingSingleArenaBufferAllocator),
+      alignof(RecordingSingleArenaBufferAllocator));
   // Use the default copy constructor to populate internal states.
-  return new (allocator_buffer) RecordingSimpleMemoryAllocator(tmp);
+  return new (allocator_buffer) RecordingSingleArenaBufferAllocator(tmp);
 }
 
-size_t RecordingSimpleMemoryAllocator::GetRequestedBytes() const {
+size_t RecordingSingleArenaBufferAllocator::GetRequestedBytes() const {
   return requested_head_bytes_ + requested_tail_bytes_;
 }
 
-size_t RecordingSimpleMemoryAllocator::GetUsedBytes() const {
+size_t RecordingSingleArenaBufferAllocator::GetUsedBytes() const {
   return used_bytes_;
 }
 
-size_t RecordingSimpleMemoryAllocator::GetAllocatedCount() const {
+size_t RecordingSingleArenaBufferAllocator::GetAllocatedCount() const {
   return alloc_count_;
 }
 
-TfLiteStatus RecordingSimpleMemoryAllocator::ResizeBuffer(
+TfLiteStatus RecordingSingleArenaBufferAllocator::ResizeBuffer(
     uint8_t* resizable_buf, size_t size, size_t alignment) {
   const uint8_t* previous_head = head();
   TfLiteStatus status =
-      SimpleMemoryAllocator::ResizeBuffer(resizable_buf, size, alignment);
+      SingleArenaBufferAllocator::ResizeBuffer(resizable_buf, size, alignment);
   if (status == kTfLiteOk) {
     used_bytes_ += head() - previous_head;
     requested_head_bytes_ = size;
@@ -69,11 +71,11 @@ TfLiteStatus RecordingSimpleMemoryAllocator::ResizeBuffer(
   return status;
 }
 
-uint8_t* RecordingSimpleMemoryAllocator::AllocatePersistentBuffer(
+uint8_t* RecordingSingleArenaBufferAllocator::AllocatePersistentBuffer(
     size_t size, size_t alignment) {
   const uint8_t* previous_tail = tail();
   uint8_t* result =
-      SimpleMemoryAllocator::AllocatePersistentBuffer(size, alignment);
+      SingleArenaBufferAllocator::AllocatePersistentBuffer(size, alignment);
   if (result != nullptr) {
     used_bytes_ += previous_tail - tail();
     requested_tail_bytes_ += size;

+ 13 - 14
code/components/tflite-lib/tensorflow/lite/micro/recording_simple_memory_allocator.h → code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h

@@ -13,28 +13,27 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 
-#ifndef TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_
-#define TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_
+#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_
+#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_
 
+#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h"
 #include "tensorflow/lite/micro/compatibility.h"
-#include "tensorflow/lite/micro/simple_memory_allocator.h"
 
 namespace tflite {
 
-// Utility class used to log allocations of a SimpleMemoryAllocator. Should only
-// be used in debug/evaluation settings or unit tests to evaluate allocation
-// usage.
-class RecordingSimpleMemoryAllocator : public SimpleMemoryAllocator {
+// Utility class used to log allocations of a SingleArenaBufferAllocator. Should
+// only be used in debug/evaluation settings or unit tests to evaluate
+// allocation usage.
+class RecordingSingleArenaBufferAllocator : public SingleArenaBufferAllocator {
  public:
-  RecordingSimpleMemoryAllocator(ErrorReporter* error_reporter,
-                                 uint8_t* buffer_head, size_t buffer_size);
+  RecordingSingleArenaBufferAllocator(ErrorReporter* error_reporter,
+                                      uint8_t* buffer_head, size_t buffer_size);
   // TODO(b/157615197): Cleanup constructors/destructor and use factory
   // functions.
-  ~RecordingSimpleMemoryAllocator() override;
+  ~RecordingSingleArenaBufferAllocator() override;
 
-  static RecordingSimpleMemoryAllocator* Create(ErrorReporter* error_reporter,
-                                                uint8_t* buffer_head,
-                                                size_t buffer_size);
+  static RecordingSingleArenaBufferAllocator* Create(
+      ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size);
 
   // Returns the number of bytes requested from the head or tail.
   size_t GetRequestedBytes() const;
@@ -62,4 +61,4 @@ class RecordingSimpleMemoryAllocator : public SimpleMemoryAllocator {
 
 }  // namespace tflite
 
-#endif  // TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_
+#endif  // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_

+ 44 - 40
code/components/tflite-lib/tensorflow/lite/micro/simple_memory_allocator.cc → code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.cc

@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/lite/micro/simple_memory_allocator.h"
+#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h"
 
 #include <cstddef>
 #include <cstdint>
@@ -29,42 +29,45 @@ limitations under the License.
 
 namespace tflite {
 
-SimpleMemoryAllocator::SimpleMemoryAllocator(ErrorReporter* error_reporter,
-                                             uint8_t* buffer_head,
-                                             uint8_t* buffer_tail)
-    : error_reporter_(error_reporter),
+SingleArenaBufferAllocator::SingleArenaBufferAllocator(
+    ErrorReporter* error_reporter, uint8_t* buffer_head, uint8_t* buffer_tail)
+    :
+#if !defined(TF_LITE_STRIP_ERROR_STRINGS)
+      error_reporter_(error_reporter),
+#endif
       buffer_head_(buffer_head),
       buffer_tail_(buffer_tail),
       head_(buffer_head),
       tail_(buffer_tail),
-      temp_(buffer_head_) {}
+      temp_(buffer_head_) {
+}
 
-SimpleMemoryAllocator::SimpleMemoryAllocator(ErrorReporter* error_reporter,
-                                             uint8_t* buffer,
-                                             size_t buffer_size)
-    : SimpleMemoryAllocator(error_reporter, buffer, buffer + buffer_size) {}
+SingleArenaBufferAllocator::SingleArenaBufferAllocator(
+    ErrorReporter* error_reporter, uint8_t* buffer, size_t buffer_size)
+    : SingleArenaBufferAllocator(error_reporter, buffer, buffer + buffer_size) {
+}
 
 /* static */
-SimpleMemoryAllocator* SimpleMemoryAllocator::Create(
+SingleArenaBufferAllocator* SingleArenaBufferAllocator::Create(
     ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) {
   TFLITE_DCHECK(error_reporter != nullptr);
   TFLITE_DCHECK(buffer_head != nullptr);
-  SimpleMemoryAllocator tmp =
-      SimpleMemoryAllocator(error_reporter, buffer_head, buffer_size);
+  SingleArenaBufferAllocator tmp =
+      SingleArenaBufferAllocator(error_reporter, buffer_head, buffer_size);
 
-  // Allocate enough bytes from the buffer to create a SimpleMemoryAllocator.
-  // The new instance will use the current adjusted tail buffer from the tmp
-  // allocator instance.
+  // Allocate enough bytes from the buffer to create a
+  // SingleArenaBufferAllocator. The new instance will use the current adjusted
+  // tail buffer from the tmp allocator instance.
   uint8_t* allocator_buffer = tmp.AllocatePersistentBuffer(
-      sizeof(SimpleMemoryAllocator), alignof(SimpleMemoryAllocator));
+      sizeof(SingleArenaBufferAllocator), alignof(SingleArenaBufferAllocator));
   // Use the default copy constructor to populate internal states.
-  return new (allocator_buffer) SimpleMemoryAllocator(tmp);
+  return new (allocator_buffer) SingleArenaBufferAllocator(tmp);
 }
 
-SimpleMemoryAllocator::~SimpleMemoryAllocator() {}
+SingleArenaBufferAllocator::~SingleArenaBufferAllocator() {}
 
-uint8_t* SimpleMemoryAllocator::AllocateResizableBuffer(size_t size,
-                                                        size_t alignment) {
+uint8_t* SingleArenaBufferAllocator::AllocateResizableBuffer(size_t size,
+                                                             size_t alignment) {
   // Only supports one resizable buffer, which starts at the buffer head.
   uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment);
   if (ResizeBuffer(expect_resizable_buf, size, alignment) == kTfLiteOk) {
@@ -73,20 +76,20 @@ uint8_t* SimpleMemoryAllocator::AllocateResizableBuffer(size_t size,
   return nullptr;
 }
 
-TfLiteStatus SimpleMemoryAllocator::DeallocateResizableBuffer(
+TfLiteStatus SingleArenaBufferAllocator::DeallocateResizableBuffer(
     uint8_t* resizable_buf) {
   return ResizeBuffer(resizable_buf, 0, 1);
 }
 
-TfLiteStatus SimpleMemoryAllocator::ReserveNonPersistentOverlayMemory(
+TfLiteStatus SingleArenaBufferAllocator::ReserveNonPersistentOverlayMemory(
     size_t size, size_t alignment) {
   uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment);
   return ResizeBuffer(expect_resizable_buf, size, alignment);
 }
 
-TfLiteStatus SimpleMemoryAllocator::ResizeBuffer(uint8_t* resizable_buf,
-                                                 size_t size,
-                                                 size_t alignment) {
+TfLiteStatus SingleArenaBufferAllocator::ResizeBuffer(uint8_t* resizable_buf,
+                                                      size_t size,
+                                                      size_t alignment) {
   // Only supports one resizable buffer, which starts at the buffer head.
   uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment);
   if (head_ != temp_ || resizable_buf != expect_resizable_buf) {
@@ -112,8 +115,8 @@ TfLiteStatus SimpleMemoryAllocator::ResizeBuffer(uint8_t* resizable_buf,
   return kTfLiteOk;
 }
 
-uint8_t* SimpleMemoryAllocator::AllocatePersistentBuffer(size_t size,
-                                                         size_t alignment) {
+uint8_t* SingleArenaBufferAllocator::AllocatePersistentBuffer(
+    size_t size, size_t alignment) {
   uint8_t* const aligned_result = AlignPointerDown(tail_ - size, alignment);
   if (aligned_result < head_) {
 #ifndef TF_LITE_STRIP_ERROR_STRINGS
@@ -129,7 +132,8 @@ uint8_t* SimpleMemoryAllocator::AllocatePersistentBuffer(size_t size,
   return aligned_result;
 }
 
-uint8_t* SimpleMemoryAllocator::AllocateTemp(size_t size, size_t alignment) {
+uint8_t* SingleArenaBufferAllocator::AllocateTemp(size_t size,
+                                                  size_t alignment) {
   uint8_t* const aligned_result = AlignPointerUp(temp_, alignment);
   const size_t available_memory = tail_ - aligned_result;
   if (available_memory < size) {
@@ -145,12 +149,12 @@ uint8_t* SimpleMemoryAllocator::AllocateTemp(size_t size, size_t alignment) {
   return aligned_result;
 }
 
-void SimpleMemoryAllocator::DeallocateTemp(uint8_t* temp_buf) {
+void SingleArenaBufferAllocator::DeallocateTemp(uint8_t* temp_buf) {
   temp_buffer_ptr_check_sum_ ^= (reinterpret_cast<intptr_t>(temp_buf));
   temp_buffer_count_--;
 }
 
-bool SimpleMemoryAllocator::IsAllTempDeallocated() {
+bool SingleArenaBufferAllocator::IsAllTempDeallocated() {
   if (temp_buffer_count_ != 0 || temp_buffer_ptr_check_sum_ != 0) {
     MicroPrintf(
         "Number of allocated temp buffers: %d. Checksum passing status: %d",
@@ -160,7 +164,7 @@ bool SimpleMemoryAllocator::IsAllTempDeallocated() {
   return true;
 }
 
-TfLiteStatus SimpleMemoryAllocator::ResetTempAllocations() {
+TfLiteStatus SingleArenaBufferAllocator::ResetTempAllocations() {
   // TODO(b/209453859): enable error check based on IsAllTempDeallocated after
   // all AllocateTemp have been paird with DeallocateTemp
   if (!IsAllTempDeallocated()) {
@@ -172,34 +176,34 @@ TfLiteStatus SimpleMemoryAllocator::ResetTempAllocations() {
   return kTfLiteOk;
 }
 
-uint8_t* SimpleMemoryAllocator::GetOverlayMemoryAddress() const {
+uint8_t* SingleArenaBufferAllocator::GetOverlayMemoryAddress() const {
   return buffer_head_;
 }
 
-size_t SimpleMemoryAllocator::GetNonPersistentUsedBytes() const {
+size_t SingleArenaBufferAllocator::GetNonPersistentUsedBytes() const {
   return std::max(head_ - buffer_head_, temp_ - buffer_head_);
 }
 
-size_t SimpleMemoryAllocator::GetPersistentUsedBytes() const {
+size_t SingleArenaBufferAllocator::GetPersistentUsedBytes() const {
   return buffer_tail_ - tail_;
 }
 
-size_t SimpleMemoryAllocator::GetAvailableMemory(size_t alignment) const {
+size_t SingleArenaBufferAllocator::GetAvailableMemory(size_t alignment) const {
   uint8_t* const aligned_temp = AlignPointerUp(temp_, alignment);
   uint8_t* const aligned_tail = AlignPointerDown(tail_, alignment);
   return aligned_tail - aligned_temp;
 }
 
-size_t SimpleMemoryAllocator::GetUsedBytes() const {
+size_t SingleArenaBufferAllocator::GetUsedBytes() const {
   return GetPersistentUsedBytes() + GetNonPersistentUsedBytes();
 }
 
-size_t SimpleMemoryAllocator::GetBufferSize() const {
+size_t SingleArenaBufferAllocator::GetBufferSize() const {
   return buffer_tail_ - buffer_head_;
 }
 
-uint8_t* SimpleMemoryAllocator::head() const { return head_; }
+uint8_t* SingleArenaBufferAllocator::head() const { return head_; }
 
-uint8_t* SimpleMemoryAllocator::tail() const { return tail_; }
+uint8_t* SingleArenaBufferAllocator::tail() const { return tail_; }
 
 }  // namespace tflite

+ 17 - 15
code/components/tflite-lib/tensorflow/lite/micro/simple_memory_allocator.h → code/components/tflite-lib/tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h

@@ -13,37 +13,37 @@ See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
 
-#ifndef TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_
-#define TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_
+#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_
+#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_
 
 #include <cstddef>
 #include <cstdint>
 
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/micro/arena_allocator/ibuffer_allocator.h"
 #include "tensorflow/lite/micro/compatibility.h"
-#include "tensorflow/lite/micro/ibuffer_allocator.h"
 
 namespace tflite {
 
 // TODO(petewarden): This allocator never frees up or reuses  any memory, even
 // though we have enough information about lifetimes of the tensors to do so.
 // This makes it pretty wasteful, so we should use a more intelligent method.
-class SimpleMemoryAllocator : public INonPersistentBufferAllocator,
-                              public IPersistentBufferAllocator {
+class SingleArenaBufferAllocator : public INonPersistentBufferAllocator,
+                                   public IPersistentBufferAllocator {
  public:
   // TODO(b/157615197): Cleanup constructors/destructor and use factory
   // functions.
-  SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer_head,
-                        uint8_t* buffer_tail);
-  SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer,
-                        size_t buffer_size);
-  virtual ~SimpleMemoryAllocator();
+  SingleArenaBufferAllocator(ErrorReporter* error_reporter,
+                             uint8_t* buffer_head, uint8_t* buffer_tail);
+  SingleArenaBufferAllocator(ErrorReporter* error_reporter, uint8_t* buffer,
+                             size_t buffer_size);
+  virtual ~SingleArenaBufferAllocator();
 
-  // Creates a new SimpleMemoryAllocator from a given buffer head and size.
-  static SimpleMemoryAllocator* Create(ErrorReporter* error_reporter,
-                                       uint8_t* buffer_head,
-                                       size_t buffer_size);
+  // Creates a new SingleArenaBufferAllocator from a given buffer head and size.
+  static SingleArenaBufferAllocator* Create(ErrorReporter* error_reporter,
+                                            uint8_t* buffer_head,
+                                            size_t buffer_size);
 
   // Resizes a buffer that is previously returned by the
   // AllocateResizableBuffer. In current implementation, it Adjusts the head
@@ -126,7 +126,9 @@ class SimpleMemoryAllocator : public INonPersistentBufferAllocator,
  private:
   size_t GetBufferSize() const;
 
+#if !defined(TF_LITE_STRIP_ERROR_STRINGS)
   ErrorReporter* error_reporter_;
+#endif
   uint8_t* buffer_head_;
   uint8_t* buffer_tail_;
   uint8_t* head_;
@@ -147,4 +149,4 @@ class SimpleMemoryAllocator : public INonPersistentBufferAllocator,
 
 }  // namespace tflite
 
-#endif  // TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_
+#endif  // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_

+ 6 - 6
code/components/tflite-lib/tensorflow/lite/micro/fake_micro_context.cc

@@ -16,10 +16,10 @@ limitations under the License.
 #include "tensorflow/lite/micro/fake_micro_context.h"
 
 #include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h"
 #include "tensorflow/lite/micro/micro_allocator.h"
 #include "tensorflow/lite/micro/micro_arena_constants.h"
 #include "tensorflow/lite/micro/micro_error_reporter.h"
-#include "tensorflow/lite/micro/simple_memory_allocator.h"
 
 namespace tflite {
 namespace {
@@ -30,7 +30,7 @@ static uint8_t dummy_tensor_arena[KDummyTensorArenaSize];
 }  // namespace
 
 FakeMicroContext::FakeMicroContext(TfLiteTensor* tensors,
-                                   SimpleMemoryAllocator* allocator,
+                                   SingleArenaBufferAllocator* allocator,
                                    MicroGraph* micro_graph)
     : MicroContext(
           MicroAllocator::Create(dummy_tensor_arena, KDummyTensorArenaSize,
@@ -67,10 +67,10 @@ TfLiteEvalTensor* FakeMicroContext::GetEvalTensor(int tensor_index) {
 }
 
 void* FakeMicroContext::AllocatePersistentBuffer(size_t bytes) {
-  // FakeMicroContext use SimpleMemoryAllocator, which does not automatically
-  // apply the buffer alignment like MicroAllocator.
-  // The buffer alignment is potentially wasteful but allows the
-  // fake_micro_context to work correctly with optimized kernels.
+  // FakeMicroContext use SingleArenaBufferAllocator, which does not
+  // automatically apply the buffer alignment like MicroAllocator. The buffer
+  // alignment is potentially wasteful but allows the fake_micro_context to work
+  // correctly with optimized kernels.
   return allocator_->AllocatePersistentBuffer(bytes,
                                               MicroArenaBufferAlignment());
 }

Некоторые файлы не были показаны из-за большого количества измененных файлов