add_n.cc 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #include "tensorflow/lite/kernels/internal/reference/add_n.h"
  13. #include <cstdint>
  14. #include "tensorflow/lite/c/common.h"
  15. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
  16. #include "tensorflow/lite/kernels/kernel_util.h"
  17. #include "tensorflow/lite/micro/kernels/kernel_util.h"
  18. namespace tflite {
  19. namespace {
  20. constexpr int kInputTensor0 = 0;
  21. constexpr int kOutputTensor = 0;
  22. TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) {
  23. int num_inputs = NumInputs(node);
  24. TF_LITE_ENSURE(context, num_inputs >= 2);
  25. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
  26. const TfLiteTensor* input_tensor_first;
  27. TF_LITE_ENSURE_OK(
  28. context, GetInputSafe(context, node, kInputTensor0, &input_tensor_first));
  29. TfLiteTensor* output;
  30. TF_LITE_ENSURE_OK(context,
  31. GetOutputSafe(context, node, kOutputTensor, &output));
  32. // Check that all tensors have the same shape and type.
  33. TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_tensor_first->type);
  34. for (int i = kInputTensor0 + 1; i < num_inputs; ++i) {
  35. const TfLiteTensor* input;
  36. TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
  37. TF_LITE_ENSURE(context, HaveSameShapes(input_tensor_first, input));
  38. TF_LITE_ENSURE_TYPES_EQ(context, input_tensor_first->type, input->type);
  39. }
  40. // Allocate scratch buffer space for pointer to each tensor's data
  41. // and store the scratch buffer index in the node's user_data
  42. if (output->type == kTfLiteFloat32) {
  43. int scratch_index;
  44. size_t scratch_size = sizeof(float*) * num_inputs;
  45. TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena(
  46. context, scratch_size, &scratch_index));
  47. node->user_data =
  48. reinterpret_cast<decltype(node->user_data)>(scratch_index);
  49. } else {
  50. TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32, got %s.",
  51. TfLiteTypeGetName(output->type));
  52. return kTfLiteError;
  53. }
  54. return kTfLiteOk;
  55. }
  56. TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
  57. return CalculateOpData(context, node);
  58. }
  59. template <typename T>
  60. void EvalAddN(TfLiteContext* context, TfLiteNode* node,
  61. TfLiteEvalTensor* output) {
  62. int num_inputs = NumInputs(node);
  63. int scratch_index =
  64. static_cast<int>(reinterpret_cast<intptr_t>(node->user_data));
  65. void* scratch_buffer = context->GetScratchBuffer(context, scratch_index);
  66. const T** all_inputs = static_cast<decltype(all_inputs)>(scratch_buffer);
  67. for (int i = 0; i < num_inputs; i++) {
  68. const TfLiteEvalTensor* next_input =
  69. tflite::micro::GetEvalInput(context, node, kInputTensor0 + i);
  70. all_inputs[i] = tflite::micro::GetTensorData<T>(next_input);
  71. }
  72. reference_ops::AddN<T>(tflite::micro::GetTensorShape(output), num_inputs,
  73. all_inputs, tflite::micro::GetTensorData<T>(output));
  74. }
  75. TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
  76. TfLiteEvalTensor* output =
  77. tflite::micro::GetEvalOutput(context, node, kOutputTensor);
  78. if (output->type == kTfLiteFloat32) {
  79. EvalAddN<float>(context, node, output);
  80. } else {
  81. TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32, got %s.",
  82. TfLiteTypeGetName(output->type));
  83. return kTfLiteError;
  84. }
  85. return kTfLiteOk;
  86. }
  87. } // namespace
  88. TfLiteRegistration Register_ADD_N() {
  89. return {/*init=*/nullptr,
  90. /*free=*/nullptr,
  91. /*prepare=*/Prepare,
  92. /*invoke=*/Eval,
  93. /*profiling_string=*/nullptr,
  94. /*builtin_code=*/0,
  95. /*custom_name=*/nullptr,
  96. /*version=*/0};
  97. }
  98. } // namespace tflite