kernel_util.cc 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #include "tensorflow/lite/kernels/kernel_util.h"
  13. #include <stdint.h>
  14. #include <stdlib.h>
  15. #include <algorithm>
  16. #include <complex>
  17. #include <limits>
  18. #include <memory>
  19. #include "tensorflow/lite/c/builtin_op_data.h"
  20. #include "tensorflow/lite/c/common.h"
  21. #include "tensorflow/lite/kernels/internal/cppmath.h"
  22. #include "tensorflow/lite/kernels/internal/quantization_util.h"
  23. namespace tflite {
  24. namespace {
  25. // Assumes tensor_index is a valid index (in bounds)
  26. inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context,
  27. int tensor_index) {
  28. if (context->tensors != nullptr) {
  29. return &context->tensors[tensor_index];
  30. } else {
  31. return context->GetTensor(context, tensor_index);
  32. }
  33. }
  34. // Validate in a single place to reduce binary size
  35. inline TfLiteStatus ValidateTensorIndexingSafe(const TfLiteContext* context,
  36. int index, int max_size,
  37. const int* tensor_indices,
  38. int* tensor_index) {
  39. if (index < 0 || index >= max_size) {
  40. TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
  41. "Invalid tensor index %d (not in [0, %d))\n", index,
  42. max_size);
  43. return kTfLiteError;
  44. }
  45. if (tensor_indices[index] == kTfLiteOptionalTensor) {
  46. TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
  47. "Tensor at index %d was optional but was expected\n",
  48. index);
  49. return kTfLiteError;
  50. }
  51. *tensor_index = tensor_indices[index];
  52. return kTfLiteOk;
  53. }
  54. // Same as above but returns -1 for invalid inputs instead of status + logging
  55. // error.
  56. inline int ValidateTensorIndexing(const TfLiteContext* context, int index,
  57. int max_size, const int* tensor_indices) {
  58. if (index >= 0 && index < max_size) {
  59. const int tensor_index = tensor_indices[index];
  60. if (tensor_index != kTfLiteOptionalTensor) {
  61. return tensor_index;
  62. }
  63. }
  64. return -1;
  65. }
  66. inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
  67. const TfLiteNode* node, int index) {
  68. const int tensor_index = ValidateTensorIndexing(
  69. context, index, node->inputs->size, node->inputs->data);
  70. if (tensor_index < 0) {
  71. return nullptr;
  72. }
  73. return GetTensorAtIndex(context, tensor_index);
  74. }
  75. inline TfLiteStatus GetMutableInputSafe(const TfLiteContext* context,
  76. const TfLiteNode* node, int index,
  77. const TfLiteTensor** tensor) {
  78. int tensor_index;
  79. TF_LITE_ENSURE_OK(
  80. context, ValidateTensorIndexingSafe(context, index, node->inputs->size,
  81. node->inputs->data, &tensor_index));
  82. *tensor = GetTensorAtIndex(context, tensor_index);
  83. return kTfLiteOk;
  84. }
  85. } // anonymous namespace.
  86. const TfLiteTensor* GetInput(const TfLiteContext* context,
  87. const TfLiteNode* node, int index) {
  88. return GetMutableInput(context, node, index);
  89. }
  90. TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
  91. int index, const TfLiteTensor** tensor) {
  92. return GetMutableInputSafe(context, node, index, tensor);
  93. }
  94. TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
  95. int index) {
  96. TfLiteTensor* tensor = GetMutableInput(context, node, index);
  97. return tensor->is_variable ? tensor : nullptr;
  98. }
  99. TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
  100. int index) {
  101. const int tensor_index = ValidateTensorIndexing(
  102. context, index, node->outputs->size, node->outputs->data);
  103. if (tensor_index < 0) {
  104. return nullptr;
  105. }
  106. return GetTensorAtIndex(context, tensor_index);
  107. }
  108. TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
  109. int index, TfLiteTensor** tensor) {
  110. int tensor_index;
  111. TF_LITE_ENSURE_OK(
  112. context, ValidateTensorIndexingSafe(context, index, node->outputs->size,
  113. node->outputs->data, &tensor_index));
  114. *tensor = GetTensorAtIndex(context, tensor_index);
  115. return kTfLiteOk;
  116. }
  117. const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
  118. const TfLiteNode* node, int index) {
  119. return GetInput(context, node, index);
  120. }
  121. #ifndef TF_LITE_STATIC_MEMORY
  122. TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
  123. int index) {
  124. const int tensor_index = ValidateTensorIndexing(
  125. context, index, node->temporaries->size, node->temporaries->data);
  126. if (tensor_index < 0) {
  127. return nullptr;
  128. }
  129. return GetTensorAtIndex(context, tensor_index);
  130. }
  131. TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
  132. const TfLiteNode* node, int index,
  133. TfLiteTensor** tensor) {
  134. int tensor_index;
  135. TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
  136. context, index, node->temporaries->size,
  137. node->temporaries->data, &tensor_index));
  138. *tensor = GetTensorAtIndex(context, tensor_index);
  139. return kTfLiteOk;
  140. }
  141. const TfLiteTensor* GetIntermediates(TfLiteContext* context,
  142. const TfLiteNode* node, int index) {
  143. const int tensor_index = ValidateTensorIndexing(
  144. context, index, node->intermediates->size, node->intermediates->data);
  145. if (tensor_index < 0) {
  146. return nullptr;
  147. }
  148. return GetTensorAtIndex(context, tensor_index);
  149. }
  150. TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
  151. const TfLiteNode* node, int index,
  152. TfLiteTensor** tensor) {
  153. int tensor_index;
  154. TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
  155. context, index, node->intermediates->size,
  156. node->intermediates->data, &tensor_index));
  157. *tensor = GetTensorAtIndex(context, tensor_index);
  158. return kTfLiteOk;
  159. }
  160. #endif // TF_LITE_STATIC_MEMORY
  161. // Per-axis
  162. TfLiteStatus PopulateConvolutionQuantizationParams(
  163. TfLiteContext* context, const TfLiteTensor* input,
  164. const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
  165. const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
  166. int32_t* output_activation_min, int32_t* output_activation_max,
  167. int32_t* per_channel_multiplier, int* per_channel_shift) {
  168. const auto* affine_quantization =
  169. reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
  170. return PopulateConvolutionQuantizationParams(
  171. context, input, filter, bias, output, activation, multiplier, shift,
  172. output_activation_min, output_activation_max, per_channel_multiplier,
  173. per_channel_shift, affine_quantization->scale->size);
  174. }
  175. // Per-axis & per-tensor
  176. TfLiteStatus PopulateConvolutionQuantizationParams(
  177. TfLiteContext* context, const TfLiteTensor* input,
  178. const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
  179. const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
  180. int32_t* output_activation_min, int32_t* output_activation_max,
  181. int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels) {
  182. TF_LITE_ENSURE_EQ(context, input->quantization.type,
  183. kTfLiteAffineQuantization);
  184. TF_LITE_ENSURE_EQ(context, filter->quantization.type,
  185. kTfLiteAffineQuantization);
  186. // TODO(jianlijianli): Enable bias type check and bias scale == input scale
  187. // * filter scale for each channel in affine quantization once bias
  188. // quantization is properly populated.
  189. // TF_LITE_ENSURE_EQ(context, bias->quantization.type,
  190. // kTfLiteAffineQuantization);
  191. // Check data type.
  192. const auto* affine_quantization =
  193. reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
  194. TF_LITE_ENSURE(context, affine_quantization);
  195. TF_LITE_ENSURE(context, affine_quantization->scale);
  196. const bool is_per_channel = affine_quantization->scale->size > 1;
  197. if (is_per_channel) {
  198. // Currently only Int8/Int16 is supported for per channel quantization.
  199. TF_LITE_ENSURE(context,
  200. input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
  201. TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8);
  202. TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels);
  203. TF_LITE_ENSURE_EQ(
  204. context, num_channels,
  205. filter->dims->data[affine_quantization->quantized_dimension]);
  206. }
  207. // Populate multiplier and shift using affine quantization.
  208. const float input_scale = input->params.scale;
  209. const float output_scale = output->params.scale;
  210. const float* filter_scales = affine_quantization->scale->data;
  211. for (int i = 0; i < num_channels; ++i) {
  212. // If per-tensor quantization parameter is specified, broadcast it along the
  213. // quantization dimension (channels_out).
  214. const float scale = is_per_channel ? filter_scales[i] : filter_scales[0];
  215. const double filter_scale = static_cast<double>(scale);
  216. const double effective_output_scale = static_cast<double>(input_scale) *
  217. filter_scale /
  218. static_cast<double>(output_scale);
  219. int32_t significand;
  220. int channel_shift;
  221. QuantizeMultiplier(effective_output_scale, &significand, &channel_shift);
  222. per_channel_multiplier[i] = significand;
  223. per_channel_shift[i] = channel_shift;
  224. }
  225. // Populate scalar quantization parameters.
  226. // This check on legacy quantization parameters is kept only for backward
  227. // compatibility.
  228. if (input->type == kTfLiteUInt8) {
  229. // Check bias scale == input scale * filter scale.
  230. double real_multiplier = 0.0;
  231. TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
  232. context, input, filter, bias, output, &real_multiplier));
  233. int exponent;
  234. // Populate quantization parameters with multiplier and shift.
  235. QuantizeMultiplier(real_multiplier, multiplier, &exponent);
  236. *shift = -exponent;
  237. }
  238. if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
  239. input->type == kTfLiteInt16) {
  240. TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
  241. context, activation, output, output_activation_min,
  242. output_activation_max));
  243. }
  244. return kTfLiteOk;
  245. }
  246. TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
  247. const TfLiteTensor* input,
  248. const TfLiteTensor* filter,
  249. const TfLiteTensor* bias,
  250. TfLiteTensor* output,
  251. double* multiplier) {
  252. const double input_product_scale = static_cast<double>(input->params.scale) *
  253. static_cast<double>(filter->params.scale);
  254. // TODO(ahentz): The following conditions must be guaranteed by the training
  255. // pipeline.
  256. if (bias) {
  257. const double bias_scale = static_cast<double>(bias->params.scale);
  258. // Here we're making sure the input_product_scale & bias_scale are about the
  259. // same. Since we have:
  260. // (output - output_zp) * output_scale =
  261. // input_product_scale * input_product + bias * bias_scale ---- (0)
  262. //
  263. // (0) equals:
  264. // (input_product + bias) * input_product_scale ----- (1)
  265. // +
  266. // bias * (bias_scale - input_product_scale) ------ (2)
  267. //
  268. // For the real kernel computation, we're doing (1), so we really need to
  269. // make sure (2) has minimum impact on the output, so:
  270. // bias * (bias_scale - input_product_scale) / output_scale should be
  271. // a small number for an integer.
  272. // Since normally bias should be within a small range.
  273. // We should expect (bias_scale - input_product_scale) / output_scale to
  274. // be a small number like 0.02.
  275. const double scale_diff = std::abs(input_product_scale - bias_scale);
  276. const double output_scale = static_cast<double>(output->params.scale);
  277. TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02);
  278. }
  279. return GetQuantizedConvolutionMultipler(context, input, filter, output,
  280. multiplier);
  281. }
  282. TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
  283. const TfLiteTensor* input,
  284. const TfLiteTensor* filter,
  285. TfLiteTensor* output,
  286. double* multiplier) {
  287. const double input_product_scale =
  288. static_cast<double>(input->params.scale * filter->params.scale);
  289. TF_LITE_ENSURE(context, input_product_scale >= 0);
  290. *multiplier = input_product_scale / static_cast<double>(output->params.scale);
  291. return kTfLiteOk;
  292. }
  293. namespace {
  294. void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation,
  295. int32_t qmin, int32_t qmax,
  296. TfLiteTensor* output,
  297. int32_t* act_min, int32_t* act_max) {
  298. const auto scale = output->params.scale;
  299. const auto zero_point = output->params.zero_point;
  300. auto quantize = [scale, zero_point](float f) {
  301. return zero_point + static_cast<int32_t>(TfLiteRound(f / scale));
  302. };
  303. if (activation == kTfLiteActRelu) {
  304. *act_min = std::max(qmin, quantize(0.0));
  305. *act_max = qmax;
  306. } else if (activation == kTfLiteActRelu6) {
  307. *act_min = std::max(qmin, quantize(0.0));
  308. *act_max = std::min(qmax, quantize(6.0));
  309. } else if (activation == kTfLiteActReluN1To1) {
  310. *act_min = std::max(qmin, quantize(-1.0));
  311. *act_max = std::min(qmax, quantize(1.0));
  312. } else {
  313. *act_min = qmin;
  314. *act_max = qmax;
  315. }
  316. }
  317. } // namespace
  318. TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
  319. TfLiteFusedActivation activation,
  320. TfLiteTensor* output,
  321. int32_t* act_min,
  322. int32_t* act_max) {
  323. int32_t qmin = 0;
  324. int32_t qmax = 0;
  325. if (output->type == kTfLiteUInt8) {
  326. qmin = std::numeric_limits<uint8_t>::min();
  327. qmax = std::numeric_limits<uint8_t>::max();
  328. } else if (output->type == kTfLiteInt8) {
  329. qmin = std::numeric_limits<int8_t>::min();
  330. qmax = std::numeric_limits<int8_t>::max();
  331. } else if (output->type == kTfLiteInt16) {
  332. qmin = std::numeric_limits<int16_t>::min();
  333. qmax = std::numeric_limits<int16_t>::max();
  334. } else {
  335. TF_LITE_ENSURE(context, false);
  336. }
  337. CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min,
  338. act_max);
  339. return kTfLiteOk;
  340. }
  341. bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
  342. return TfLiteIntArrayEqual(input1->dims, input2->dims);
  343. }
  344. // TODO(petewarden): Having macros around this is ugly, look at other strategies
  345. // before replicating this approach elsewhere.
  346. #ifndef TF_LITE_STATIC_MEMORY
  347. TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
  348. const TfLiteTensor* input1,
  349. const TfLiteTensor* input2,
  350. TfLiteIntArray** output_shape) {
  351. int dims1 = NumDimensions(input1);
  352. int dims2 = NumDimensions(input2);
  353. int out_dims = std::max(dims1, dims2);
  354. if (NumElements(input1) == 0) {
  355. *output_shape = TfLiteIntArrayCopy(input1->dims);
  356. return kTfLiteOk;
  357. }
  358. std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
  359. TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
  360. for (int i = 0; i < out_dims; ++i) {
  361. int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
  362. int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
  363. TF_LITE_ENSURE(context, d1 == d2 || d1 == 1 || d2 == 1);
  364. shape->data[out_dims - i - 1] = std::max(d1, d2);
  365. }
  366. *output_shape = shape.release();
  367. return kTfLiteOk;
  368. }
  369. TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
  370. const TfLiteTensor* input1,
  371. const TfLiteTensor* input2,
  372. const TfLiteTensor* input3,
  373. TfLiteIntArray** output_shape) {
  374. int dims1 = NumDimensions(input1);
  375. int dims2 = NumDimensions(input2);
  376. int dims3 = NumDimensions(input3);
  377. int out_dims = std::max(std::max(dims1, dims2), dims3);
  378. std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
  379. TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
  380. for (int i = 0; i < out_dims; ++i) {
  381. int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
  382. int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
  383. int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1);
  384. int max_value = std::max(std::max(d1, d2), d3);
  385. TF_LITE_ENSURE(context, d1 == 1 || d1 == max_value);
  386. TF_LITE_ENSURE(context, d2 == 1 || d2 == max_value);
  387. TF_LITE_ENSURE(context, d3 == 1 || d3 == max_value);
  388. shape->data[out_dims - i - 1] = max_value;
  389. }
  390. *output_shape = shape.release();
  391. return kTfLiteOk;
  392. }
  393. #endif // TF_LITE_STATIC_MEMORY
  394. // Size of string is not constant, return 0 in such case.
  395. int TfLiteTypeGetSize(TfLiteType type) {
  396. switch (type) {
  397. case kTfLiteUInt8:
  398. TF_LITE_ASSERT_EQ(sizeof(uint8_t), 1);
  399. return 1;
  400. case kTfLiteInt8:
  401. TF_LITE_ASSERT_EQ(sizeof(int8_t), 1);
  402. return 1;
  403. case kTfLiteBool:
  404. return sizeof(bool);
  405. case kTfLiteInt16:
  406. TF_LITE_ASSERT_EQ(sizeof(int16_t), 2);
  407. return 2;
  408. case kTfLiteFloat16:
  409. TF_LITE_ASSERT_EQ(sizeof(int16_t), 2);
  410. return 2;
  411. case kTfLiteFloat32:
  412. TF_LITE_ASSERT_EQ(sizeof(float), 4);
  413. return 4;
  414. case kTfLiteInt32:
  415. TF_LITE_ASSERT_EQ(sizeof(int32_t), 4);
  416. return 4;
  417. case kTfLiteInt64:
  418. TF_LITE_ASSERT_EQ(sizeof(int64_t), 8);
  419. return 8;
  420. case kTfLiteFloat64:
  421. TF_LITE_ASSERT_EQ(sizeof(double), 8);
  422. return 8;
  423. case kTfLiteComplex64:
  424. TF_LITE_ASSERT_EQ(sizeof(std::complex<float>), 8);
  425. return 8;
  426. case kTfLiteComplex128:
  427. TF_LITE_ASSERT_EQ(sizeof(std::complex<double>), 16);
  428. return 16;
  429. default:
  430. return 0;
  431. }
  432. }
  433. } // namespace tflite