micro_allocator.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_
  13. #define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_
  14. #include <cstddef>
  15. #include <cstdint>
  16. #include "tensorflow/lite/c/common.h"
  17. #include "tensorflow/lite/core/api/error_reporter.h"
  18. #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
  19. #include "tensorflow/lite/micro/compatibility.h"
  20. #include "tensorflow/lite/micro/flatbuffer_utils.h"
  21. #include "tensorflow/lite/micro/simple_memory_allocator.h"
  22. #include "tensorflow/lite/schema/schema_generated.h"
  23. namespace tflite {
  24. namespace internal {
  25. // Sets up all of the data structure members for a TfLiteTensor based on the
  26. // contents of a serialized tensor in the flatbuffer.
  27. // TODO(b/162311891): Drop this method when the interpreter has an API for
  28. // returning buffers on TfLiteEvalTensor.
  29. TfLiteStatus InitializeTfLiteTensorFromFlatbuffer(
  30. SimpleMemoryAllocator* allocator, bool allocate_temp,
  31. const tflite::Tensor& flatbuffer_tensor,
  32. const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* buffers,
  33. ErrorReporter* error_reporter, TfLiteTensor* result);
  34. // Holds placeholder information for a scratch buffer request from a kernel.
  35. // This struct is only used during the model prepare stage. Each request from a
  36. // kernel is stored in the head section. During the prepare stage, the head
  37. // section will at least hold kMaxScratchBuffersPerOp number of requests plus
  38. // any requests from previous kernel requests.
  39. //
  40. // When the memory plan is finalized, these structs are no longer used in favor
  41. // of a sequential, array of ScratchBufferHandle allocations in the tail
  42. // section. These allocations are indexed by the request API defined in the
  43. // TfLiteContext struct.
  44. typedef struct {
  45. // Number of bytes required by the buffer. The actual allocated size might be
  46. // greater than `bytes` due to buffer alignment.
  47. size_t bytes;
  48. // Node where the buffer is allocated for. This provides useful information to
  49. // determine the lifetime of the buffer. In AllocationInfo, this buffer will
  50. // have `before` = node_idx and `after` = node_idx.
  51. int node_idx;
  52. } ScratchBufferRequest;
  53. } // namespace internal
  54. typedef struct {
  55. TfLiteNode node;
  56. const TfLiteRegistration* registration;
  57. } NodeAndRegistration;
  58. // Holds a pointer to a buffer for a scratch buffer requested by a kernel during
  59. // the model prepare stage. This struct is allocated in-place and allows for
  60. // quick pointer-indexed lookup for speed during model inference.
  61. typedef struct {
  62. // Pointer to location of the scratch buffer:
  63. uint8_t* data;
  64. } ScratchBufferHandle;
  65. // Stores all per-subgraph allocations. This includes the node and registration
  66. // array, tensor list and scratch buffer handles for each subgraph.
  67. typedef struct {
  68. NodeAndRegistration* node_and_registrations;
  69. TfLiteEvalTensor* tensors;
  70. } SubgraphAllocations;
  71. // Allocator responsible for allocating memory for all intermediate tensors
  72. // necessary to invoke a model.
  73. //
  74. // The lifetime of the model, tensor arena and error reporter must be at
  75. // least as long as that of the allocator object, since the allocator needs
  76. // them to be accessible during its entire lifetime.
  77. //
  78. // The MicroAllocator simply plans out additional allocations that are required
  79. // to standup a model for inference in TF Micro. This class currently relies on
  80. // an additional allocator - SimpleMemoryAllocator - for all allocations from an
  81. // arena. These allocations are divided into head (non-persistent) and tail
  82. // (persistent) regions:
  83. //
  84. // Memory layout to help understand how it works
  85. // This information could change in the future version.
  86. // ************** .memory_allocator->GetBuffer()
  87. // Tensors/Scratch buffers (head)
  88. // ************** .head_watermark
  89. // unused memory
  90. // ************** .memory_allocator->GetBuffer() + ->GetMaxBufferSize()
  91. // - ->GetDataSize()
  92. // persistent area (tail)
  93. // ************** .memory_allocator->GetBuffer() + ->GetMaxBufferSize()
  94. class MicroAllocator {
  95. public:
  96. // Creates a MicroAllocator instance from a given tensor arena. This arena
  97. // will be managed by the created instance.
  98. // Note: Please use __declspec(align(16)) to make sure tensor_arena is 16
  99. // bytes aligned, otherwise some head room will be wasted.
  100. // TODO(b/157615197): Cleanup constructor + factory usage.
  101. static MicroAllocator* Create(uint8_t* tensor_arena, size_t arena_size,
  102. ErrorReporter* error_reporter);
  103. // Creates a MicroAllocator instance using the provided SimpleMemoryAllocator
  104. // intance. This allocator instance will use the SimpleMemoryAllocator
  105. // instance to manage allocations internally.
  106. static MicroAllocator* Create(SimpleMemoryAllocator* memory_allocator,
  107. ErrorReporter* error_reporter);
  108. // Allocates internal resources required for model inference for each subgraph
  109. // from the arena.
  110. //
  111. // This method will run through the flatbuffer data supplied in the model to
  112. // properly allocate tensor, node, and op registration data. This method is
  113. // expected to be followed with a call to FinishModelAllocation() Returns a
  114. // pointer to an array of SubgraphAllocations (also stored in the tail of the
  115. // arena) where each index corresponds to a different subgraph in the model.
  116. // Return value is nullptr if the allocations failed.
  117. SubgraphAllocations* StartModelAllocation(const Model* model);
  118. // Finish allocating internal resources required for model inference.
  119. //
  120. // -Plan the memory for activation tensors and scratch buffers.
  121. // -Update eval tensors for each subgraph based on planned offsets.
  122. // -Allocate scratch buffer handles array and update based on planned offsets.
  123. //
  124. // This method should be called after assigning model resources
  125. // in StartModelAllocation(). The subgraph_allocations pointer should be the
  126. // value passed into this class during StartModelAllocation(). Scratch buffer
  127. // handles are stored in the out-param `scratch_buffer_handles` array which is
  128. // allocated in this method. This value will be used in `GetScratchBuffer`
  129. // call to retrieve scratch buffers.
  130. TfLiteStatus FinishModelAllocation(
  131. const Model* model, SubgraphAllocations* subgraph_allocations,
  132. ScratchBufferHandle** scratch_buffer_handles);
  133. // Allocates a TfLiteTensor struct and populates the returned value with
  134. // properties from the model flatbuffer. This struct is allocated from
  135. // persistent arena memory is only guaranteed for the lifetime of the
  136. // application. The eval_tensors pointer should be the value passed into this
  137. // class during StartModelAllocation() and contains the source-of-truth for
  138. // buffers.
  139. virtual TfLiteTensor* AllocatePersistentTfLiteTensor(
  140. const Model* model, const SubgraphAllocations* subgraph_allocations,
  141. int tensor_index, int subgraph_index);
  142. // Allocates a TfLiteTensor struct and populates the returned value with
  143. // properties from the model flatbuffer. This struct is allocated from
  144. // temporary arena memory is only guaranteed until a call is made to
  145. // ResetTempAllocations(). Subgraph_allocaitons contains the array of
  146. // TfLiteEvalTensors. If the newly allocated temp at the specified subgraph
  147. // and tensor index is already present int the TfLiteEvalTensor array, its
  148. // data buffer will be re-used.
  149. virtual TfLiteTensor* AllocateTempTfLiteTensor(
  150. const Model* model, const SubgraphAllocations* subgraph_allocations,
  151. int tensor_index, int subgraph_index);
  152. // Resets all temporary allocations. This method should be called after a
  153. // chain of temp allocations (e.g. chain of TfLiteTensor objects via
  154. // AllocateTfLiteTensor()).
  155. virtual void ResetTempAllocations();
  156. // Allocates persistent buffer which has the same life time as the allocator.
  157. // The memory is immediately available and is allocated from the tail of the
  158. // arena.
  159. virtual void* AllocatePersistentBuffer(size_t bytes);
  160. // Register a scratch buffer of size `bytes` for Node with `node_id`.
  161. // This method only requests a buffer with a given size to be used after a
  162. // model has finished allocation via FinishModelAllocation(). All requested
  163. // buffers will be accessible by the out-param in that method.
  164. TfLiteStatus RequestScratchBufferInArena(size_t bytes, int subgraph_idx,
  165. int* buffer_idx);
  166. // Finish allocating a specific NodeAndRegistration prepare block (kernel
  167. // entry for a model) with a given node ID. This call ensures that any scratch
  168. // buffer requests and temporary allocations are handled and ready for the
  169. // next node prepare block.
  170. TfLiteStatus FinishPrepareNodeAllocations(int node_id);
  171. // Returns the arena usage in bytes, only available after
  172. // `FinishModelAllocation`. Otherwise, it will return 0.
  173. size_t used_bytes() const;
  174. // Converts a flatbuffer int32_t array to a TfLiteIntArray, accounting for
  175. // endiannes.
  176. TfLiteStatus FlatBufferVectorToTfLiteTypeArray(
  177. const flatbuffers::Vector<int32_t>* flatbuffer_array,
  178. TfLiteIntArray** result);
  179. BuiltinDataAllocator* GetBuiltinDataAllocator();
  180. protected:
  181. MicroAllocator(SimpleMemoryAllocator* memory_allocator,
  182. ErrorReporter* error_reporter);
  183. virtual ~MicroAllocator();
  184. // Allocates an array in the arena to hold pointers to the node and
  185. // registration pointers required to represent the inference graph of the
  186. // model.
  187. virtual TfLiteStatus AllocateNodeAndRegistrations(
  188. const Model* model, SubgraphAllocations* subgraph_allocations);
  189. // Allocates the list of persistent TfLiteEvalTensors that are used for the
  190. // "eval" phase of model inference. These structs will be the source of truth
  191. // for all tensor buffers.
  192. virtual TfLiteStatus AllocateTfLiteEvalTensors(
  193. const Model* model, SubgraphAllocations* subgraph_allocations);
  194. // Allocates persistent tensor buffers for variable tensors in the subgraph.
  195. virtual TfLiteStatus AllocateVariables(const SubGraph* subgraph,
  196. TfLiteEvalTensor* eval_tensors);
  197. // Allocate and return a persistent TfLiteTensor.
  198. // TODO(b/162311891): Drop this method when the interpreter has an API for
  199. // accessing TfLiteEvalTensor structs.
  200. virtual TfLiteTensor* AllocatePersistentTfLiteTensorInternal();
  201. // Populates a TfLiteTensor struct with data from the model flatbuffer. Any
  202. // quantization data is allocated from either the tail (persistent) or temp
  203. // sections of the arena based on the allocation flag.
  204. virtual TfLiteStatus PopulateTfLiteTensorFromFlatbuffer(const Model* model,
  205. TfLiteTensor* tensor,
  206. int tensor_index,
  207. int subgraph_idx,
  208. bool allocate_temp);
  209. ErrorReporter* error_reporter() const;
  210. private:
  211. // Commits a memory plan for all non-persistent buffer allocations in the
  212. // 'head' section of the memory arena. The eval_tensors pointer is the list of
  213. // pre-allocated TfLiteEvalTensor structs that will point to the buffers that
  214. // will be allocated into the head section in this function call. The
  215. // scratch_buffer_handles pointer is the array of pre-allocated
  216. // ScratchBufferHandle structs that will point to allocated buffers also in
  217. // the head section.
  218. virtual TfLiteStatus CommitStaticMemoryPlan(
  219. const Model* model, TfLiteEvalTensor* eval_tensors,
  220. ScratchBufferHandle* scratch_buffer_handles, int subgraph_idx);
  221. // Allocates an array of ScratchBufferHandle structs in the tail section for a
  222. // given number of handles.
  223. virtual TfLiteStatus AllocateScratchBufferHandles(
  224. ScratchBufferHandle** scratch_buffer_handles, size_t handle_count);
  225. // Clears all internal scratch buffer request counts and resets the head to
  226. // prepare for kernels to request scratch buffer data when a model is
  227. // preparing.
  228. TfLiteStatus InitScratchBufferData();
  229. // Returns the pointer for the array of ScratchBufferRequest allocations in
  230. // the head section.
  231. internal::ScratchBufferRequest* GetScratchBufferRequests();
  232. // A simple memory allocator that always allocate from the arena tail or head.
  233. SimpleMemoryAllocator* memory_allocator_;
  234. // Allocator used to allocate persistent builtin data.
  235. BuiltinDataAllocator* builtin_data_allocator_;
  236. ErrorReporter* error_reporter_;
  237. bool model_is_allocating_;
  238. // Holds the number of ScratchBufferRequest instances stored in the head
  239. // section when a model is allocating.
  240. size_t scratch_buffer_request_count_ = 0;
  241. // Holds the byte length of the memory plan with the largest head usage. Used
  242. // to ensure that multi-tenant allocations can share the head for buffers.
  243. size_t max_head_buffer_usage_ = 0;
  244. TF_LITE_REMOVE_VIRTUAL_DELETE
  245. };
  246. } // namespace tflite
  247. #endif // TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_