select.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_
  13. #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_
  14. #include <cmath>
  15. #include "ruy/profiler/instrumentation.h" // from @ruy
  16. #include "tensorflow/lite/kernels/internal/common.h"
  17. #include "tensorflow/lite/kernels/internal/types.h"
  18. namespace tflite {
  19. namespace reference_ops {
  20. template <typename D, typename T>
  21. void Select(const RuntimeShape& input_condition_shape,
  22. const D* input_condition_data, const RuntimeShape& input_x_shape,
  23. const T* input_x_data, const RuntimeShape& input_y_shape,
  24. const T* input_y_data, const RuntimeShape& output_shape,
  25. T* output_data) {
  26. ruy::profiler::ScopeLabel label("Select");
  27. int64_t flatsize;
  28. // Allow select operator executions on mixed scalar tensors and one element
  29. // tensors.
  30. if (input_condition_shape.FlatSize() == 1 && input_x_shape.FlatSize() == 1 &&
  31. input_y_shape.FlatSize() == 1 && output_shape.FlatSize() == 1) {
  32. flatsize = 1;
  33. } else {
  34. flatsize = MatchingFlatSize(input_condition_shape, input_x_shape,
  35. input_y_shape, output_shape);
  36. }
  37. for (int64_t i = 0; i < flatsize; ++i) {
  38. output_data[i] =
  39. input_condition_data[i] ? input_x_data[i] : input_y_data[i];
  40. }
  41. }
  42. template <typename D, typename T>
  43. void RankOneSelect(const RuntimeShape& input_condition_shape,
  44. const D* input_condition_data,
  45. const RuntimeShape& input_x_shape, const T* input_x_data,
  46. const RuntimeShape& input_y_shape, const T* input_y_data,
  47. const RuntimeShape& output_shape, T* output_data) {
  48. ruy::profiler::ScopeLabel label("Select/RankOneSelect");
  49. const int64_t outer_size = input_condition_shape.FlatSize();
  50. int64_t inner_size;
  51. if (input_condition_shape.DimensionsCount() == 0) {
  52. inner_size = MatchingFlatSize(input_x_shape, input_y_shape, output_shape);
  53. } else {
  54. TFLITE_DCHECK_EQ(
  55. MatchingDim(input_x_shape, 0, input_y_shape, 0, output_shape, 0),
  56. outer_size);
  57. inner_size =
  58. MatchingFlatSizeSkipDim(input_x_shape, 0, input_y_shape, output_shape);
  59. }
  60. int64_t offset = 0;
  61. for (int64_t i = 0; i < outer_size; i++) {
  62. const T* input_data = input_condition_data[i] ? input_x_data : input_y_data;
  63. memcpy(output_data + offset, input_data + offset, inner_size * sizeof(T));
  64. offset += inner_size;
  65. }
  66. }
  67. template <typename D, typename T>
  68. void BroadcastSelect5DSlow(const RuntimeShape& input_condition_shape,
  69. const D* input_condition_data,
  70. const RuntimeShape& input_x_shape,
  71. const T* input_x_data,
  72. const RuntimeShape& input_y_shape,
  73. const T* input_y_data,
  74. const RuntimeShape& output_shape, T* output_data) {
  75. ruy::profiler::ScopeLabel label("Select/BroadcastSelectSlow");
  76. TFLITE_DCHECK_LE(input_condition_shape.DimensionsCount(), 5);
  77. TFLITE_DCHECK_LE(input_x_shape.DimensionsCount(), 5);
  78. TFLITE_DCHECK_LE(input_y_shape.DimensionsCount(), 5);
  79. TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 5);
  80. NdArrayDesc<5> desc_condition;
  81. NdArrayDesc<5> desc_x;
  82. NdArrayDesc<5> desc_y;
  83. NdArrayDesc<5> desc_output;
  84. const RuntimeShape extended_output_shape =
  85. RuntimeShape::ExtendedShape(5, output_shape);
  86. CopyDimsToDesc(extended_output_shape, &desc_output);
  87. NdArrayDescsForElementwiseBroadcast(input_condition_shape, input_x_shape,
  88. input_y_shape, &desc_condition, &desc_x,
  89. &desc_y);
  90. // In Tensorflow, the dimensions are canonically named (batch_number, row,
  91. // col, channel), with extents (batches, height, width, depth), with the
  92. // trailing dimension changing most rapidly (channels has the smallest
  93. // stride, typically 1 element).
  94. //
  95. // In generated C code, we store arrays with the dimensions reversed. The
  96. // first dimension has smallest stride.
  97. //
  98. // We name our variables by their Tensorflow convention, but generate C code
  99. // nesting loops such that the innermost loop has the smallest stride for
  100. // the best cache behavior.
  101. for (int n = 0; n < desc_output.extents[0]; ++n) {
  102. int out_idx_n = desc_output.extents[1] * n;
  103. int cond_idx_n = desc_condition.strides[0] * n;
  104. int in_idx1_n = desc_x.strides[0] * n;
  105. int in_idx2_n = desc_y.strides[0] * n;
  106. for (int b = 0; b < desc_output.extents[1]; ++b) {
  107. int out_idx_b = (out_idx_n + b) * desc_output.extents[2];
  108. int cond_idx_b = cond_idx_n + desc_condition.strides[1] * b;
  109. int in_idx1_b = in_idx1_n + desc_x.strides[1] * b;
  110. int in_idx2_b = in_idx2_n + desc_y.strides[1] * b;
  111. for (int y = 0; y < desc_output.extents[2]; ++y) {
  112. int out_idx_y = (out_idx_b + y) * desc_output.extents[3];
  113. int cond_idx_y = cond_idx_b + desc_condition.strides[2] * y;
  114. int in_idx1_y = in_idx1_b + desc_x.strides[2] * y;
  115. int in_idx2_y = in_idx2_b + desc_y.strides[2] * y;
  116. for (int x = 0; x < desc_output.extents[3]; ++x) {
  117. int out_idx = (out_idx_y + x) * desc_output.extents[4];
  118. int cond_idx = cond_idx_y + desc_condition.strides[3] * x;
  119. int in_idx1 = in_idx1_y + desc_x.strides[3] * x;
  120. int in_idx2 = in_idx2_y + desc_y.strides[3] * x;
  121. for (int c = 0; c < desc_output.extents[4]; ++c) {
  122. output_data[out_idx] = input_condition_data[cond_idx]
  123. ? input_x_data[in_idx1]
  124. : input_y_data[in_idx2];
  125. out_idx++;
  126. cond_idx += desc_condition.strides[4];
  127. in_idx1 += desc_x.strides[4];
  128. in_idx2 += desc_y.strides[4];
  129. }
  130. }
  131. }
  132. }
  133. }
  134. }
  135. } // namespace reference_ops
  136. } // namespace tflite
  137. #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_