test_helpers.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_
  13. #define TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_
  14. #include <cstdint>
  15. #include <limits>
  16. #include "flatbuffers/flatbuffers.h" // from @flatbuffers
  17. #include "tensorflow/lite/c/common.h"
  18. #include "tensorflow/lite/kernels/internal/compatibility.h"
  19. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
  20. #include "tensorflow/lite/micro/all_ops_resolver.h"
  21. #include "tensorflow/lite/micro/micro_utils.h"
  22. #include "tensorflow/lite/portable_type_to_tflitetype.h"
  23. #include "tensorflow/lite/schema/schema_generated.h"
  24. namespace tflite {
  25. namespace testing {
  26. constexpr int kOfflinePlannerHeaderSize = 3;
  27. struct NodeConnection_ {
  28. std::initializer_list<int32_t> input;
  29. std::initializer_list<int32_t> output;
  30. };
  31. typedef struct NodeConnection_ NodeConnection;
  32. // A simple operator that returns the median of the input with the number of
  33. // times the kernel was invoked. The implementation below is deliberately
  34. // complicated, just to demonstrate how kernel memory planning works.
  35. class SimpleStatefulOp {
  36. static constexpr int kBufferNotAllocated = 0;
  37. // Inputs:
  38. static constexpr int kInputTensor = 0;
  39. // Outputs:
  40. static constexpr int kMedianTensor = 0;
  41. static constexpr int kInvokeCount = 1;
  42. struct OpData {
  43. int* invoke_count = nullptr;
  44. int sorting_buffer = kBufferNotAllocated;
  45. };
  46. public:
  47. static const TfLiteRegistration* getRegistration();
  48. static TfLiteRegistration* GetMutableRegistration();
  49. static void* Init(TfLiteContext* context, const char* buffer, size_t length);
  50. static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node);
  51. static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node);
  52. };
  53. class MockCustom {
  54. public:
  55. static const TfLiteRegistration* getRegistration();
  56. static TfLiteRegistration* GetMutableRegistration();
  57. static void* Init(TfLiteContext* context, const char* buffer, size_t length);
  58. static void Free(TfLiteContext* context, void* buffer);
  59. static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node);
  60. static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node);
  61. static bool freed_;
  62. };
  63. // A simple operator with the purpose of testing multiple inputs. It returns
  64. // the sum of the inputs.
  65. class MultipleInputs {
  66. public:
  67. static const TfLiteRegistration* getRegistration();
  68. static TfLiteRegistration* GetMutableRegistration();
  69. static void* Init(TfLiteContext* context, const char* buffer, size_t length);
  70. static void Free(TfLiteContext* context, void* buffer);
  71. static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node);
  72. static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node);
  73. static bool freed_;
  74. };
  75. // A simple no-op operator.
  76. class NoOp {
  77. public:
  78. static const TfLiteRegistration* getRegistration();
  79. static TfLiteRegistration* GetMutableRegistration();
  80. static void* Init(TfLiteContext* context, const char* buffer, size_t length);
  81. static void Free(TfLiteContext* context, void* buffer);
  82. static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node);
  83. static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node);
  84. static bool freed_;
  85. };
  86. // Returns an Op Resolver that can be used in the testing code.
  87. AllOpsResolver GetOpResolver();
  88. // Returns a simple example flatbuffer TensorFlow Lite model. Contains 1 input,
  89. // 1 layer of weights, 1 output Tensor, and 1 operator.
  90. const Model* GetSimpleMockModel();
  91. // Returns a flatbuffer TensorFlow Lite model with more inputs, variable
  92. // tensors, and operators.
  93. const Model* GetComplexMockModel();
  94. // Returns a simple example flatbuffer TensorFlow Lite model. Contains 1 input,
  95. // 1 layer of weights, 1 output Tensor, and 1 operator.
  96. // The size of all three tensors is 256 x 256, which is larger than what other
  97. // models provide from this test helper.
  98. const Model* GetModelWith256x256Tensor();
  99. // Returns a simple flatbuffer model with two branches.
  100. const Model* GetSimpleModelWithBranch();
  101. // Returns a simple example flatbuffer TensorFlow Lite model. Contains 3 inputs,
  102. // 1 output Tensor, and 1 operator.
  103. const Model* GetSimpleMultipleInputsModel();
  104. // Returns a simple flatbuffer model with offline planned tensors
  105. // @param[in] num_tensors Number of tensors in the model.
  106. // @param[in] metadata_buffer Metadata for offline planner.
  107. // @param[in] node_con List of connections, i.e. operators
  108. // in the model.
  109. // @param[in] num_conns Number of connections.
  110. // @param[in] num_subgraph_inputs How many of the input tensors are in
  111. // the subgraph inputs. The default value
  112. // of 0 means all of the input tensors
  113. // are in the subgraph input list. There
  114. // must be at least 1 input tensor in the
  115. // subgraph input list.
  116. const Model* GetModelWithOfflinePlanning(int num_tensors,
  117. const int32_t* metadata_buffer,
  118. NodeConnection* node_conn,
  119. int num_conns,
  120. int num_subgraph_inputs = 0);
  121. // Returns a flatbuffer with a single operator, two inputs (one unused) and one
  122. // output.
  123. const Model* GetModelWithUnusedInputs();
  124. // Returns a flatbuffer with a single operator, zero inputs and two outputs
  125. // (one unused).
  126. const Model* GetModelWithUnusedOperatorOutputs();
  127. // Returns a flatbuffer model with `simple_stateful_op`
  128. const Model* GetSimpleStatefulModel();
  129. // Returns a flatbuffer model with "if" and two subgraphs.
  130. const Model* GetSimpleModelWithSubgraphsAndIf();
  131. // Returns a flatbuffer model with "while" and three subgraphs.
  132. const Model* GetSimpleModelWithSubgraphsAndWhile();
  133. // Returns a flatbuffer model with "if" and two subgraphs and the input tensor 1
  134. // of "if" subgraph overlaps with the input tensor 2 of subgraph 1.
  135. const Model* GetModelWithIfAndSubgraphInputTensorOverlap();
  136. // Returns a flatbuffer model with null subgraph/operator inputs and outputs.
  137. const Model* GetSimpleModelWithNullInputsAndOutputs();
  138. // Builds a one-dimensional flatbuffer tensor of the given size.
  139. const Tensor* Create1dFlatbufferTensor(int size, bool is_variable = false);
  140. // Builds a one-dimensional flatbuffer tensor of the given size with
  141. // quantization metadata.
  142. const Tensor* CreateQuantizedFlatbufferTensor(int size);
  143. // Creates a one-dimensional tensor with no quantization metadata.
  144. const Tensor* CreateMissingQuantizationFlatbufferTensor(int size);
  145. // Creates a vector of flatbuffer buffers.
  146. const flatbuffers::Vector<flatbuffers::Offset<Buffer>>*
  147. CreateFlatbufferBuffers();
  148. // Performs a simple string comparison without requiring standard C library.
  149. int TestStrcmp(const char* a, const char* b);
  150. // Wrapper to forward kernel errors to the interpreter's error reporter.
  151. void ReportOpError(struct TfLiteContext* context, const char* format, ...);
  152. void PopulateContext(TfLiteTensor* tensors, int tensors_size,
  153. TfLiteContext* context);
  154. // Create a TfLiteIntArray from an array of ints. The first element in the
  155. // supplied array must be the size of the array expressed as an int.
  156. TfLiteIntArray* IntArrayFromInts(int* int_array);
  157. // Create a TfLiteFloatArray from an array of floats. The first element in the
  158. // supplied array must be the size of the array expressed as a float.
  159. TfLiteFloatArray* FloatArrayFromFloats(const float* floats);
  160. template <typename T>
  161. TfLiteTensor CreateTensor(const T* data, TfLiteIntArray* dims,
  162. const bool is_variable = false) {
  163. TfLiteTensor result;
  164. result.dims = dims;
  165. result.params = {};
  166. result.quantization = {kTfLiteNoQuantization, nullptr};
  167. result.is_variable = is_variable;
  168. result.allocation_type = kTfLiteMemNone;
  169. result.type = typeToTfLiteType<T>();
  170. // Const cast is used to allow passing in const and non-const arrays within a
  171. // single CreateTensor method. A Const array should be used for immutable
  172. // input tensors and non-const array should be used for mutable and output
  173. // tensors.
  174. result.data.data = const_cast<T*>(data);
  175. result.quantization = {kTfLiteAffineQuantization, nullptr};
  176. result.bytes = ElementCount(*dims) * sizeof(T);
  177. return result;
  178. }
  179. template <typename T>
  180. TfLiteTensor CreateQuantizedTensor(const T* data, TfLiteIntArray* dims,
  181. const float scale, const int zero_point = 0,
  182. const bool is_variable = false) {
  183. TfLiteTensor result = CreateTensor(data, dims, is_variable);
  184. result.params = {scale, zero_point};
  185. result.quantization = {kTfLiteAffineQuantization, nullptr};
  186. return result;
  187. }
  188. template <typename T>
  189. TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized,
  190. TfLiteIntArray* dims, float scale,
  191. int zero_point, bool is_variable = false) {
  192. int input_size = ElementCount(*dims);
  193. tflite::Quantize(input, quantized, input_size, scale, zero_point);
  194. return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable);
  195. }
  196. TfLiteTensor CreateQuantizedBiasTensor(const float* data, int16_t* quantized,
  197. TfLiteIntArray* dims, float input_scale,
  198. float weights_scale,
  199. bool is_variable = false);
  200. TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
  201. TfLiteIntArray* dims, float input_scale,
  202. float weights_scale,
  203. bool is_variable = false);
  204. TfLiteTensor CreateQuantizedBiasTensor(const float* data,
  205. std::int64_t* quantized,
  206. TfLiteIntArray* dims, float input_scale,
  207. float weights_scale,
  208. bool is_variable = false);
  209. // Quantizes int32_t bias tensor with per-channel weights determined by input
  210. // scale multiplied by weight scale for each channel.
  211. TfLiteTensor CreatePerChannelQuantizedBiasTensor(
  212. const float* input, int32_t* quantized, TfLiteIntArray* dims,
  213. float input_scale, float* weight_scales, float* scales, int* zero_points,
  214. TfLiteAffineQuantization* affine_quant, int quantized_dimension,
  215. bool is_variable = false);
  216. // Quantizes int64_t bias tensor with per-channel weights determined by input
  217. // scale multiplied by weight scale for each channel.
  218. TfLiteTensor CreatePerChannelQuantizedBiasTensor(
  219. const float* input, std::int64_t* quantized, TfLiteIntArray* dims,
  220. float input_scale, float* weight_scales, float* scales, int* zero_points,
  221. TfLiteAffineQuantization* affine_quant, int quantized_dimension,
  222. bool is_variable = false);
  223. TfLiteTensor CreateSymmetricPerChannelQuantizedTensor(
  224. const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales,
  225. int* zero_points, TfLiteAffineQuantization* affine_quant,
  226. int quantized_dimension, bool is_variable = false);
  227. // Returns the number of tensors in the default subgraph for a tflite::Model.
  228. size_t GetModelTensorCount(const Model* model);
  229. // Derives the quantization scaling factor from a min and max range.
  230. template <typename T>
  231. inline float ScaleFromMinMax(const float min, const float max) {
  232. return (max - min) /
  233. static_cast<float>((std::numeric_limits<T>::max() * 1.0) -
  234. std::numeric_limits<T>::min());
  235. }
  236. // Derives the quantization zero point from a min and max range.
  237. template <typename T>
  238. inline int ZeroPointFromMinMax(const float min, const float max) {
  239. return static_cast<int>(std::numeric_limits<T>::min()) +
  240. static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
  241. }
  242. } // namespace testing
  243. } // namespace tflite
  244. #endif // TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_