micro_allocator.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_
  13. #define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_
  14. #include "tensorflow/lite/c/common.h"
  15. #include "tensorflow/lite/core/api/error_reporter.h"
  16. #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
  17. #include "tensorflow/lite/micro/simple_memory_allocator.h"
  18. #include "tensorflow/lite/schema/schema_generated.h"
  19. namespace tflite {
  20. // Namespace used for unittests.
  21. namespace internal {
  22. // Sets up all of the data structure members for a runtime tensor
  23. // based on the contents of a serialized tensor.
  24. TfLiteStatus InitializeRuntimeTensor(
  25. SimpleMemoryAllocator* allocator, const tflite::Tensor& flatbuffer_tensor,
  26. const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* buffers,
  27. ErrorReporter* error_reporter, TfLiteTensor* result);
  28. // A handle tracking scratch buffer allocation. This handle is created by
  29. // `RequestScratchBufferInArena`. `data` field is populated in
  30. // `FinishTensorAllocation` after static memory planning.
  31. // TODO(b/150257460) As a future optimization, this struct could be replaced by
  32. // a union, since once `data` is populated, `bytes` and `node_idx` is not
  33. // needed.
  34. typedef struct {
  35. // Pointer to the scratch buffer.
  36. uint8_t* data;
  37. // Number of bytes required by the buffer. The actual allocated size might be
  38. // greater than `bytes` due to buffer alignment.
  39. size_t bytes;
  40. // Node where the buffer is allocated for. This provides useful information to
  41. // determine the lifetime of the buffer. In AllocationInfo, this buffer will
  42. // have `before` = node_idx and `after` = node_idx.
  43. int node_idx;
  44. } ScratchBufferHandle;
  45. } // namespace internal
  46. typedef struct {
  47. TfLiteNode node;
  48. const TfLiteRegistration* registration;
  49. } NodeAndRegistration;
  50. // Allocator responsible for allocating memory for all intermediate tensors
  51. // necessary to invoke a model.
  52. // Memory layout to help understand how it works
  53. // This information could change in the future version.
  54. // ************** .memory_allocator->GetBuffer()
  55. // Tensors/Scratch buffers (head)
  56. // ************** .head_watermark
  57. // unused memory
  58. // ************** .memory_allocator->GetBuffer() + ->GetMaxBufferSize()
  59. // - ->GetDataSize()
  60. // persistent area (tail)
  61. // ************** .memory_allocator->GetBuffer() + ->GetMaxBufferSize()
  62. class MicroAllocator {
  63. public:
  64. // The lifetime of the model, tensor allocator and error reporter must be at
  65. // least as long as that of the allocator object, since the allocator needs
  66. // them to be accessible during its entire lifetime.
  67. // Note: Please use __declspec(align(16)) to make sure tensor_arena is 16
  68. // bytes aligned, otherwise some head room will be wasted.
  69. MicroAllocator(TfLiteContext* context, const Model* model,
  70. uint8_t* tensor_arena, size_t arena_size,
  71. ErrorReporter* error_reporter);
  72. // Runs through the model and allocates all necessary input, output and
  73. // intermediate tensors.
  74. // WARNING: doing any allocation after calling this method has the risk of
  75. // corrupting tensor data so this method should be the last non-const method
  76. // called in this class.
  77. TfLiteStatus FinishTensorAllocation();
  78. // Returns the arena usage in bytes, only available after
  79. // `FinishTensorAllocation`. Otherwise, it will return 0.
  80. size_t used_bytes() const {
  81. if (active_) {
  82. return 0;
  83. }
  84. return memory_allocator_->GetUsedBytes();
  85. }
  86. // Run through the model to allocate nodes and registrations. We need to keep
  87. // them for the entire life time of the model to allow persistent tensors.
  88. // This method needs to be called before FinishTensorAllocation method.
  89. TfLiteStatus AllocateNodeAndRegistrations(
  90. const OpResolver& op_resolver,
  91. NodeAndRegistration** node_and_registrations);
  92. // Allocates persistent buffer which has the same life time as the allocator.
  93. // The memory is immediately available and is allocated from the tail of the
  94. // arena.
  95. TfLiteStatus AllocatePersistentBuffer(size_t bytes, void** ptr);
  96. // Register a scratch buffer of size `bytes` for Node with `node_id`.
  97. // This method only allocates a BufferHandle holding information for memory
  98. // planning. The buffer ptr is ready after `FinishTensorAllocation` and can
  99. // be retrieved by `GetScratchBuffer` method using the returned buffer_idx.
  100. // Note that there should be no tail allocation between two consecutive
  101. // `RequestScratchBufferInArena` calls.
  102. TfLiteStatus RequestScratchBufferInArena(int node_id, size_t bytes,
  103. int* buffer_idx);
  104. // Returns the pointer to the planned scratch buffer.
  105. void* GetScratchBuffer(int buffer_idx) const;
  106. private:
  107. TfLiteStatus Init();
  108. const Model* model_;
  109. // A simple memory allocator that always allocate from the arena tail.
  110. SimpleMemoryAllocator* memory_allocator_;
  111. ErrorReporter* error_reporter_;
  112. TfLiteContext* context_;
  113. // Indicating if the allocator is ready for allocation.
  114. bool active_ = false;
  115. // In reverse order for efficiency.
  116. // i.e. scratch_buffer_handles_[0] is the handle for the last buffer,
  117. // corresponding to the last RequestScratchBufferInArena call.
  118. internal::ScratchBufferHandle* scratch_buffer_handles_ = nullptr;
  119. // How many scratch buffers have been allocated.
  120. size_t scratch_buffer_count_ = 0;
  121. const SubGraph* subgraph_;
  122. const flatbuffers::Vector<flatbuffers::Offset<Operator>>* operators_;
  123. const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors_;
  124. };
  125. } // namespace tflite
  126. #endif // TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_