micro_allocation_info.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATION_INFO_H_
  13. #define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATION_INFO_H_
  14. #include "tensorflow/lite/c/common.h"
  15. #include "tensorflow/lite/core/api/error_reporter.h"
  16. #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
  17. #include "tensorflow/lite/micro/compatibility.h"
  18. #include "tensorflow/lite/micro/flatbuffer_utils.h"
  19. #include "tensorflow/lite/micro/micro_allocator.h"
  20. #include "tensorflow/lite/schema/schema_generated.h"
  21. namespace tflite {
  22. // Used to hold information used during allocation calculations.
  23. struct AllocationInfo {
  24. size_t bytes;
  25. void** output_ptr;
  26. int first_created;
  27. int last_used;
  28. int32_t offline_offset;
  29. bool needs_allocating;
  30. };
  31. // Used to hold the allocation info list and related metadata for the entire
  32. // graph (including subgraphs). Since all subgraphs are planned together, the
  33. // allocation info list contains allocations for all subgraphs. Track the offset
  34. // into this list for each subgraph then reserve space to track all allocations.
  35. //
  36. // The AllocationInfo list is a contiguous list of allocations across all
  37. // subgraphs and scratch buffers. Each element here is marked as
  38. // s<subgraph index>t<tensor index>. The following is a possible
  39. // AllocationInfo list:
  40. // [s0t0, s0t1, s1t0, s2t1, s1t2, s3t0, s3t1, scratch0, scratch1, scratch2]
  41. //
  42. // For this example, the subgraph offsets would be [0, 2, 5] and the scratch
  43. // offset would be 7.
  44. struct GraphAllocationInfo {
  45. AllocationInfo* allocation_info;
  46. size_t allocation_info_count;
  47. size_t* subgraph_offsets;
  48. size_t scratch_offset;
  49. size_t tensor_count;
  50. size_t scratch_buffer_count;
  51. };
  52. // A helper class to construct AllocationInfo array. This array contains the
  53. // lifetime of tensors / scratch_buffer and will be used to calculate the memory
  54. // plan. Methods need to be called in order from `Create`, Init`, `Add*`, to
  55. // `Finish`.
  56. class AllocationInfoBuilder {
  57. public:
  58. AllocationInfoBuilder(const Model* model,
  59. INonPersistentBufferAllocator* non_persistent_allocator,
  60. ErrorReporter* reporter)
  61. : model_(model),
  62. non_persistent_allocator_(non_persistent_allocator)
  63. #if !defined(TF_LITE_STRIP_ERROR_STRINGS)
  64. ,
  65. reporter_(reporter)
  66. #endif
  67. {
  68. }
  69. // Check if model contains offline planned buffer offsets.
  70. // - If there's no metadata available, offline_planner_offsets is not set
  71. // - If there's metadata available, offline_planner_offsets will point to the
  72. // first offset in the metadata buffer list.
  73. TfLiteStatus GetOfflinePlannedOffsets(
  74. const int32_t** offline_planner_offsets);
  75. // Allocate memory for the allocation info array as well as offsets into that
  76. // array for each subgraph.
  77. TfLiteStatus CreateAllocationInfo(int scratch_buffer_request_count);
  78. // Release memory used for the allocation info array.
  79. TfLiteStatus FreeAllocationInfo();
  80. // Initialize AllocationInfo for all tensors and scratch buffers in the graph.
  81. TfLiteStatus InitializeAllocationInfo(const int32_t* offline_offsets,
  82. SubgraphAllocations* allocations);
  83. // Mark the scope of each tensor and scratch buffer across the graph. Enter
  84. // all possible subgraphs invoked by each control flow operator. This method
  85. // marks the maximum lifetime of each buffer so that tensors are correctly
  86. // planned for all valid invocation flows.
  87. TfLiteStatus MarkAllocationLifetimes(
  88. int subgraph_idx, internal::ScratchBufferRequest* scratch_buffer_request,
  89. ScratchBufferHandle* scratch_buffer_handles,
  90. SubgraphAllocations* allocations);
  91. // Identify control flow operators and recursively mark all subgraphs which
  92. // that operator can invoke. The lifetime of all tensors within a subgraph
  93. // can only be extended. The order of subgraph invocation does not matter
  94. // since subgraphs within the same control flow operator are executed
  95. // within their own allocation scope (planned buffers in a subgraph cannot
  96. // persist beyond the end of that subgraph's invocation).
  97. TfLiteStatus MarkSubgraphLifetimesIfNecessary(
  98. const Operator* op,
  99. internal::ScratchBufferRequest* scratch_buffer_requests,
  100. ScratchBufferHandle* scratch_buffer_handles,
  101. SubgraphAllocations* allocations);
  102. // Returns the number of allocations.
  103. int AllocationCount() const { return info_.allocation_info_count; }
  104. // Returns a pointer to the built AllocationInfo array.
  105. AllocationInfo* Finish() const { return info_.allocation_info; }
  106. private:
  107. // Mark the given Allocation info as first created at the specified allocation
  108. // scope count. Only the first creation must be recorded since the allocation
  109. // scope count monotonically increases throughout the lifetime marking
  110. // process.
  111. void UpdateFirstCreated(AllocationInfo* current, int allocation_scope_count);
  112. // Mark the given AllocationInfo as last used at the specified allocation
  113. // scope
  114. // count. Update the last used marker every time, since the allocation scope
  115. // count monotonically increases through the lifetime marking process.
  116. void UpdateLastUsed(AllocationInfo* current, int allocation_scope_count);
  117. // Validate if a subgraph satisfies assumptions.
  118. TfLiteStatus ValidateSubgraph(const SubGraph* subgraph,
  119. TfLiteEvalTensor* eval_tensors);
  120. const tflite::Model* model_ = nullptr;
  121. INonPersistentBufferAllocator* non_persistent_allocator_ = nullptr;
  122. #if !defined(TF_LITE_STRIP_ERROR_STRINGS)
  123. ErrorReporter* reporter_ = nullptr;
  124. #endif
  125. GraphAllocationInfo info_;
  126. int allocation_scope_count_ = 0;
  127. };
  128. } // namespace tflite
  129. #endif // TENSORFLOW_LITE_MICRO_MICRO_ALLOCATION_INFO_H_