kernel_util.h 3.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_
  13. #define TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_
  14. #include <cstdint>
  15. #include "tensorflow/lite/c/builtin_op_data.h"
  16. #include "tensorflow/lite/c/common.h"
  17. #include "tensorflow/lite/kernels/internal/compatibility.h"
  18. #include "tensorflow/lite/kernels/internal/types.h"
  19. namespace tflite {
  20. namespace micro {
  21. // Returns a mutable tensor for a given input index. is_variable must be checked
  22. // during prepare when the full TfLiteTensor is available.
  23. inline TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context,
  24. const TfLiteNode* node,
  25. int index) {
  26. TFLITE_DCHECK(context != nullptr);
  27. TFLITE_DCHECK(node != nullptr);
  28. return context->GetEvalTensor(context, node->inputs->data[index]);
  29. }
  30. // Returns the TfLiteEvalTensor struct for a given input index in a node.
  31. inline const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context,
  32. const TfLiteNode* node, int index) {
  33. return GetMutableEvalInput(context, node, index);
  34. }
  35. // Returns the TfLiteEvalTensor struct for a given output index in a node.
  36. inline TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context,
  37. const TfLiteNode* node, int index) {
  38. TFLITE_DCHECK(context != nullptr);
  39. TFLITE_DCHECK(node != nullptr);
  40. return context->GetEvalTensor(context, node->outputs->data[index]);
  41. }
  42. // Returns data for a TfLiteEvalTensor struct.
  43. template <typename T>
  44. T* GetTensorData(TfLiteEvalTensor* tensor) {
  45. return tensor != nullptr ? reinterpret_cast<T*>(tensor->data.raw) : nullptr;
  46. }
  47. // Returns const data for a TfLiteEvalTensor struct.
  48. template <typename T>
  49. const T* GetTensorData(const TfLiteEvalTensor* tensor) {
  50. TFLITE_DCHECK(tensor != nullptr);
  51. return reinterpret_cast<const T*>(tensor->data.raw);
  52. }
  53. // Returns the shape of a TfLiteEvalTensor struct.
  54. const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor);
  55. // Return true if the given tensors have the same shape.
  56. bool HaveSameShapes(const TfLiteEvalTensor* input1,
  57. const TfLiteEvalTensor* input2);
  58. PaddingType RuntimePaddingType(TfLitePadding padding);
  59. // Relocate tensor dims from FlatBuffer to the persistent storage arena.
  60. // The old dims data is copied to the new storage area.
  61. // The tensor and eval_tensor must be the same tensor.
  62. // Only use during Prepare phase.
  63. TfLiteStatus CreateWritableTensorDimsWithCopy(TfLiteContext* context,
  64. TfLiteTensor* tensor,
  65. TfLiteEvalTensor* eval_tensor);
  66. } // namespace micro
  67. } // namespace tflite
  68. #endif // TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_