micro_interpreter.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_
  13. #define TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_
  14. #include "tensorflow/lite/c/common.h"
  15. #include "tensorflow/lite/core/api/error_reporter.h"
  16. #include "tensorflow/lite/core/api/op_resolver.h"
  17. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
  18. #include "tensorflow/lite/micro/micro_allocator.h"
  19. #include "tensorflow/lite/schema/schema_generated.h"
  20. #include "tensorflow/lite/type_to_tflitetype.h"
  21. namespace tflite {
  22. namespace internal {
  23. // A helper class to encapsulate the implementation of APIs in Context.
  24. // context->impl_ points to an instance of this class.
  25. // Check tensorflow/lite/c/common.h for detailed descriptions.
  26. class ContextHelper {
  27. public:
  28. explicit ContextHelper(ErrorReporter* error_reporter,
  29. MicroAllocator* allocator)
  30. : allocator_(allocator), error_reporter_(error_reporter) {}
  31. static TfLiteStatus AllocatePersistentBuffer(TfLiteContext* ctx, size_t bytes,
  32. void** ptr);
  33. static TfLiteStatus RequestScratchBufferInArena(TfLiteContext* ctx,
  34. size_t bytes,
  35. int* buffer_idx);
  36. static void* GetScratchBuffer(TfLiteContext* ctx, int buffer_idx);
  37. static void ReportOpError(struct TfLiteContext* context, const char* format,
  38. ...);
  39. void SetNodeIndex(int idx) { current_node_idx_ = idx; }
  40. private:
  41. MicroAllocator* allocator_;
  42. ErrorReporter* error_reporter_;
  43. int current_node_idx_ = -1;
  44. };
  45. } // namespace internal
  46. class MicroInterpreter {
  47. public:
  48. // The lifetime of the model, op resolver, tensor arena, and error reporter
  49. // must be at least as long as that of the interpreter object, since the
  50. // interpreter may need to access them at any time. This means that you should
  51. // usually create them with the same scope as each other, for example having
  52. // them all allocated on the stack as local variables through a top-level
  53. // function.
  54. // The interpreter doesn't do any deallocation of any of the pointed-to
  55. // objects, ownership remains with the caller.
  56. MicroInterpreter(const Model* model, const OpResolver& op_resolver,
  57. uint8_t* tensor_arena, size_t tensor_arena_size,
  58. ErrorReporter* error_reporter);
  59. ~MicroInterpreter();
  60. // Runs through the model and allocates all necessary input, output and
  61. // intermediate tensors.
  62. TfLiteStatus AllocateTensors();
  63. // In order to support partial graph runs for strided models, this can return
  64. // values other than kTfLiteOk and kTfLiteError.
  65. // TODO(b/149795762): Add this to the TfLiteStatus enum.
  66. TfLiteStatus Invoke();
  67. size_t tensors_size() const { return context_.tensors_size; }
  68. TfLiteTensor* tensor(size_t tensor_index);
  69. template <class T>
  70. T* typed_tensor(int tensor_index) {
  71. if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) {
  72. if (tensor_ptr->type == typeToTfLiteType<T>()) {
  73. return GetTensorData<T>(tensor_ptr);
  74. }
  75. }
  76. return nullptr;
  77. }
  78. TfLiteTensor* input(size_t index);
  79. size_t inputs_size() const { return subgraph_->inputs()->Length(); }
  80. const flatbuffers::Vector<int32_t>& inputs() const {
  81. return *subgraph_->inputs();
  82. }
  83. TfLiteTensor* input_tensor(size_t index) { return input(index); }
  84. template <class T>
  85. T* typed_input_tensor(int tensor_index) {
  86. if (TfLiteTensor* tensor_ptr = input_tensor(tensor_index)) {
  87. if (tensor_ptr->type == typeToTfLiteType<T>()) {
  88. return GetTensorData<T>(tensor_ptr);
  89. }
  90. }
  91. return nullptr;
  92. }
  93. TfLiteTensor* output(size_t index);
  94. size_t outputs_size() const { return subgraph_->outputs()->Length(); }
  95. const flatbuffers::Vector<int32_t>& outputs() const {
  96. return *subgraph_->outputs();
  97. }
  98. TfLiteTensor* output_tensor(size_t index) { return output(index); }
  99. template <class T>
  100. T* typed_output_tensor(int tensor_index) {
  101. if (TfLiteTensor* tensor_ptr = output_tensor(tensor_index)) {
  102. if (tensor_ptr->type == typeToTfLiteType<T>()) {
  103. return GetTensorData<T>(tensor_ptr);
  104. }
  105. }
  106. return nullptr;
  107. }
  108. // Reset all variable tensors to the default value.
  109. TfLiteStatus ResetVariableTensors();
  110. TfLiteStatus initialization_status() const { return initialization_status_; }
  111. size_t operators_size() const { return operators_->size(); }
  112. // For debugging only.
  113. const NodeAndRegistration node_and_registration(int node_index) const {
  114. return node_and_registrations_[node_index];
  115. }
  116. // For debugging only.
  117. // Returns the actual used arena in bytes. This method gives the optimal arena
  118. // size. It's only available after `AllocateTensors` has been called.
  119. // Note that normally `tensor_arena` requires 16 bytes alignment to fully
  120. // utilize the space. If it's not the case, the optimial arena size would be
  121. // arena_used_bytes() + 16.
  122. size_t arena_used_bytes() const { return allocator_.used_bytes(); }
  123. private:
  124. void CorrectTensorEndianness(TfLiteTensor* tensorCorr);
  125. template <class T>
  126. void CorrectTensorDataEndianness(T* data, int32_t size);
  127. NodeAndRegistration* node_and_registrations_ = nullptr;
  128. const Model* model_;
  129. const OpResolver& op_resolver_;
  130. ErrorReporter* error_reporter_;
  131. TfLiteContext context_ = {};
  132. MicroAllocator allocator_;
  133. bool tensors_allocated_;
  134. TfLiteStatus initialization_status_;
  135. const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors_;
  136. const flatbuffers::Vector<flatbuffers::Offset<Operator>>* operators_;
  137. const SubGraph* subgraph_;
  138. internal::ContextHelper context_helper_;
  139. };
  140. } // namespace tflite
  141. #endif // TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_