kernel_util.cc 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #include "tensorflow/lite/kernels/kernel_util.h"
  13. #include <stdint.h>
  14. #include <stdlib.h>
  15. #include <algorithm>
  16. #include <complex>
  17. #include <limits>
  18. #include <memory>
  19. #ifndef TF_LITE_STATIC_MEMORY
  20. #include <string>
  21. #endif // TF_LITE_STATIC_MEMORY
  22. #include "tensorflow/lite/c/builtin_op_data.h"
  23. #include "tensorflow/lite/c/common.h"
  24. #include "tensorflow/lite/context_util.h"
  25. #include "tensorflow/lite/kernels/internal/cppmath.h"
  26. #include "tensorflow/lite/kernels/internal/quantization_util.h"
  27. #if defined(__APPLE__)
  28. #include "TargetConditionals.h"
  29. #endif
  30. namespace tflite {
  31. namespace {
  32. // Assumes tensor_index is a valid index (in bounds)
  33. inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context,
  34. int tensor_index) {
  35. if (context->tensors != nullptr) {
  36. return &context->tensors[tensor_index];
  37. } else {
  38. return context->GetTensor(context, tensor_index);
  39. }
  40. }
  41. // Validate in a single place to reduce binary size
  42. inline TfLiteStatus ValidateTensorIndexingSafe(const TfLiteContext* context,
  43. int index, int max_size,
  44. const int* tensor_indices,
  45. int* tensor_index) {
  46. if (index < 0 || index >= max_size) {
  47. TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
  48. "Invalid tensor index %d (not in [0, %d))\n", index,
  49. max_size);
  50. return kTfLiteError;
  51. }
  52. if (tensor_indices[index] == kTfLiteOptionalTensor) {
  53. TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
  54. "Tensor at index %d was optional but was expected\n",
  55. index);
  56. return kTfLiteError;
  57. }
  58. *tensor_index = tensor_indices[index];
  59. return kTfLiteOk;
  60. }
  61. // Same as above but returns -1 for invalid inputs instead of status + logging
  62. // error.
  63. inline int ValidateTensorIndexing(const TfLiteContext* context, int index,
  64. int max_size, const int* tensor_indices) {
  65. if (index >= 0 && index < max_size) {
  66. const int tensor_index = tensor_indices[index];
  67. if (tensor_index != kTfLiteOptionalTensor) {
  68. return tensor_index;
  69. }
  70. }
  71. return -1;
  72. }
  73. inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
  74. const TfLiteNode* node, int index) {
  75. const int tensor_index = ValidateTensorIndexing(
  76. context, index, node->inputs->size, node->inputs->data);
  77. if (tensor_index < 0) {
  78. return nullptr;
  79. }
  80. return GetTensorAtIndex(context, tensor_index);
  81. }
  82. inline TfLiteStatus GetMutableInputSafe(const TfLiteContext* context,
  83. const TfLiteNode* node, int index,
  84. const TfLiteTensor** tensor) {
  85. int tensor_index;
  86. TF_LITE_ENSURE_OK(
  87. context, ValidateTensorIndexingSafe(context, index, node->inputs->size,
  88. node->inputs->data, &tensor_index));
  89. *tensor = GetTensorAtIndex(context, tensor_index);
  90. return kTfLiteOk;
  91. }
  92. } // anonymous namespace.
  93. const TfLiteTensor* GetInput(const TfLiteContext* context,
  94. const TfLiteNode* node, int index) {
  95. return GetMutableInput(context, node, index);
  96. }
  97. TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
  98. int index, const TfLiteTensor** tensor) {
  99. return GetMutableInputSafe(context, node, index, tensor);
  100. }
  101. TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
  102. int index) {
  103. TfLiteTensor* tensor = GetMutableInput(context, node, index);
  104. if (tensor == nullptr) return nullptr;
  105. return tensor->is_variable ? tensor : nullptr;
  106. }
  107. TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
  108. int index) {
  109. const int tensor_index = ValidateTensorIndexing(
  110. context, index, node->outputs->size, node->outputs->data);
  111. if (tensor_index < 0) {
  112. return nullptr;
  113. }
  114. return GetTensorAtIndex(context, tensor_index);
  115. }
  116. TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
  117. int index, TfLiteTensor** tensor) {
  118. int tensor_index;
  119. TF_LITE_ENSURE_OK(
  120. context, ValidateTensorIndexingSafe(context, index, node->outputs->size,
  121. node->outputs->data, &tensor_index));
  122. *tensor = GetTensorAtIndex(context, tensor_index);
  123. return kTfLiteOk;
  124. }
  125. const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
  126. const TfLiteNode* node, int index) {
  127. return GetInput(context, node, index);
  128. }
  129. #ifndef TF_LITE_STATIC_MEMORY
  130. TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
  131. int index) {
  132. const int tensor_index = ValidateTensorIndexing(
  133. context, index, node->temporaries->size, node->temporaries->data);
  134. if (tensor_index < 0) {
  135. return nullptr;
  136. }
  137. return GetTensorAtIndex(context, tensor_index);
  138. }
  139. TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
  140. const TfLiteNode* node, int index,
  141. TfLiteTensor** tensor) {
  142. int tensor_index;
  143. TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
  144. context, index, node->temporaries->size,
  145. node->temporaries->data, &tensor_index));
  146. *tensor = GetTensorAtIndex(context, tensor_index);
  147. return kTfLiteOk;
  148. }
  149. const TfLiteTensor* GetIntermediates(TfLiteContext* context,
  150. const TfLiteNode* node, int index) {
  151. const int tensor_index = ValidateTensorIndexing(
  152. context, index, node->intermediates->size, node->intermediates->data);
  153. if (tensor_index < 0) {
  154. return nullptr;
  155. }
  156. return GetTensorAtIndex(context, tensor_index);
  157. }
  158. TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
  159. const TfLiteNode* node, int index,
  160. TfLiteTensor** tensor) {
  161. int tensor_index;
  162. TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
  163. context, index, node->intermediates->size,
  164. node->intermediates->data, &tensor_index));
  165. *tensor = GetTensorAtIndex(context, tensor_index);
  166. return kTfLiteOk;
  167. }
  168. #endif // TF_LITE_STATIC_MEMORY
  169. // Per-axis
  170. TfLiteStatus PopulateConvolutionQuantizationParams(
  171. TfLiteContext* context, const TfLiteTensor* input,
  172. const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
  173. const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
  174. int32_t* output_activation_min, int32_t* output_activation_max,
  175. int32_t* per_channel_multiplier, int32_t* per_channel_shift) {
  176. const auto* affine_quantization =
  177. reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
  178. return PopulateConvolutionQuantizationParams(
  179. context, input, filter, bias, output, activation, multiplier, shift,
  180. output_activation_min, output_activation_max, per_channel_multiplier,
  181. per_channel_shift, affine_quantization->scale->size);
  182. }
  183. // Per-axis & per-tensor
  184. TfLiteStatus PopulateConvolutionQuantizationParams(
  185. TfLiteContext* context, const TfLiteTensor* input,
  186. const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
  187. const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
  188. int32_t* output_activation_min, int32_t* output_activation_max,
  189. int32_t* per_channel_multiplier, int32_t* per_channel_shift,
  190. int num_channels) {
  191. TF_LITE_ENSURE_EQ(context, input->quantization.type,
  192. kTfLiteAffineQuantization);
  193. TF_LITE_ENSURE_EQ(context, filter->quantization.type,
  194. kTfLiteAffineQuantization);
  195. // TODO(jianlijianli): Enable bias type check and bias scale == input scale
  196. // * filter scale for each channel in affine quantization once bias
  197. // quantization is properly populated.
  198. // TF_LITE_ENSURE_EQ(context, bias->quantization.type,
  199. // kTfLiteAffineQuantization);
  200. // Check data type.
  201. const auto* affine_quantization =
  202. reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
  203. TF_LITE_ENSURE(context, affine_quantization);
  204. TF_LITE_ENSURE(context, affine_quantization->scale);
  205. const bool is_per_channel = affine_quantization->scale->size > 1;
  206. if (is_per_channel) {
  207. // Currently only Int8/Int16 is supported for per channel quantization.
  208. TF_LITE_ENSURE(context,
  209. input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
  210. TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8);
  211. TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels);
  212. TF_LITE_ENSURE_EQ(
  213. context, num_channels,
  214. filter->dims->data[affine_quantization->quantized_dimension]);
  215. }
  216. // Populate multiplier and shift using affine quantization.
  217. const float input_scale = input->params.scale;
  218. const float output_scale = output->params.scale;
  219. const float* filter_scales = affine_quantization->scale->data;
  220. for (int i = 0; i < num_channels; ++i) {
  221. // If per-tensor quantization parameter is specified, broadcast it along the
  222. // quantization dimension (channels_out).
  223. const float scale = is_per_channel ? filter_scales[i] : filter_scales[0];
  224. const double filter_scale = static_cast<double>(scale);
  225. const double effective_output_scale = static_cast<double>(input_scale) *
  226. filter_scale /
  227. static_cast<double>(output_scale);
  228. int32_t significand;
  229. int channel_shift;
  230. QuantizeMultiplier(effective_output_scale, &significand, &channel_shift);
  231. per_channel_multiplier[i] = significand;
  232. per_channel_shift[i] = channel_shift;
  233. }
  234. // Populate scalar quantization parameters.
  235. // This check on legacy quantization parameters is kept only for backward
  236. // compatibility.
  237. if (input->type == kTfLiteUInt8) {
  238. // Check bias scale == input scale * filter scale.
  239. double real_multiplier = 0.0;
  240. TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
  241. context, input, filter, bias, output, &real_multiplier));
  242. int exponent;
  243. // Populate quantization parameters with multiplier and shift.
  244. QuantizeMultiplier(real_multiplier, multiplier, &exponent);
  245. *shift = -exponent;
  246. }
  247. if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
  248. input->type == kTfLiteInt16) {
  249. TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
  250. context, activation, output, output_activation_min,
  251. output_activation_max));
  252. }
  253. return kTfLiteOk;
  254. }
  255. TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
  256. const TfLiteTensor* input,
  257. const TfLiteTensor* filter,
  258. const TfLiteTensor* bias,
  259. TfLiteTensor* output,
  260. double* multiplier) {
  261. const double input_product_scale = static_cast<double>(input->params.scale) *
  262. static_cast<double>(filter->params.scale);
  263. // The following conditions must be guaranteed by the training pipeline.
  264. if (bias) {
  265. const double bias_scale = static_cast<double>(bias->params.scale);
  266. // Here we're making sure the input_product_scale & bias_scale are about the
  267. // same. Since we have:
  268. // (output - output_zp) * output_scale =
  269. // input_product_scale * input_product + bias * bias_scale ---- (0)
  270. //
  271. // (0) equals:
  272. // (input_product + bias) * input_product_scale ----- (1)
  273. // +
  274. // bias * (bias_scale - input_product_scale) ------ (2)
  275. //
  276. // For the real kernel computation, we're doing (1), so we really need to
  277. // make sure (2) has minimum impact on the output, so:
  278. // bias * (bias_scale - input_product_scale) / output_scale should be
  279. // a small number for an integer.
  280. // Since normally bias should be within a small range.
  281. // We should expect (bias_scale - input_product_scale) / output_scale to
  282. // be a small number like 0.02.
  283. const double scale_diff = std::abs(input_product_scale - bias_scale);
  284. const double output_scale = static_cast<double>(output->params.scale);
  285. TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02);
  286. }
  287. return GetQuantizedConvolutionMultipler(context, input, filter, output,
  288. multiplier);
  289. }
  290. TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
  291. const TfLiteTensor* input,
  292. const TfLiteTensor* filter,
  293. TfLiteTensor* output,
  294. double* multiplier) {
  295. const double input_product_scale =
  296. static_cast<double>(input->params.scale * filter->params.scale);
  297. TF_LITE_ENSURE(context, input_product_scale >= 0);
  298. *multiplier = input_product_scale / static_cast<double>(output->params.scale);
  299. return kTfLiteOk;
  300. }
  301. namespace {
  302. inline TfLiteStatus Quantize(TfLiteContext* context, float scale,
  303. int32_t zero_point, float f, int32_t& q) {
  304. const float tmp = TfLiteRound(f / scale);
  305. const bool no_integer_overflow_from_quantization =
  306. (tmp >= static_cast<float>(std::numeric_limits<int32_t>::min()) &&
  307. tmp <= static_cast<float>(std::numeric_limits<int32_t>::max()));
  308. TF_LITE_ENSURE(context, no_integer_overflow_from_quantization);
  309. q = zero_point + static_cast<int32_t>(tmp);
  310. return kTfLiteOk;
  311. }
  312. TfLiteStatus CalculateActivationRangeQuantizedImpl(
  313. TfLiteContext* context, TfLiteFusedActivation activation, int32_t qmin,
  314. int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) {
  315. const auto scale = output->params.scale;
  316. const auto zero_point = output->params.zero_point;
  317. int32_t tmp_q;
  318. if (activation == kTfLiteActRelu) {
  319. TF_LITE_ENSURE_OK(context,
  320. Quantize(context, scale, zero_point, 0.0, tmp_q));
  321. *act_min = std::max(qmin, tmp_q);
  322. *act_max = qmax;
  323. } else if (activation == kTfLiteActRelu6) {
  324. TF_LITE_ENSURE_OK(context,
  325. Quantize(context, scale, zero_point, 0.0, tmp_q));
  326. *act_min = std::max(qmin, tmp_q);
  327. TF_LITE_ENSURE_OK(context,
  328. Quantize(context, scale, zero_point, 6.0, tmp_q));
  329. *act_max = std::min(qmax, tmp_q);
  330. } else if (activation == kTfLiteActReluN1To1) {
  331. TF_LITE_ENSURE_OK(context,
  332. Quantize(context, scale, zero_point, -1.0, tmp_q));
  333. *act_min = std::max(qmin, tmp_q);
  334. TF_LITE_ENSURE_OK(context,
  335. Quantize(context, scale, zero_point, 1.0, tmp_q));
  336. *act_max = std::min(qmax, tmp_q);
  337. } else {
  338. *act_min = qmin;
  339. *act_max = qmax;
  340. }
  341. return kTfLiteOk;
  342. }
  343. } // namespace
  344. TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
  345. TfLiteFusedActivation activation,
  346. TfLiteTensor* output,
  347. int32_t* act_min,
  348. int32_t* act_max) {
  349. int32_t qmin = 0;
  350. int32_t qmax = 0;
  351. if (output->type == kTfLiteUInt8) {
  352. qmin = std::numeric_limits<uint8_t>::min();
  353. qmax = std::numeric_limits<uint8_t>::max();
  354. } else if (output->type == kTfLiteInt8) {
  355. qmin = std::numeric_limits<int8_t>::min();
  356. qmax = std::numeric_limits<int8_t>::max();
  357. } else if (output->type == kTfLiteInt16) {
  358. qmin = std::numeric_limits<int16_t>::min();
  359. qmax = std::numeric_limits<int16_t>::max();
  360. } else {
  361. TF_LITE_ENSURE(context, false);
  362. }
  363. return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax,
  364. output, act_min, act_max);
  365. }
  366. bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
  367. return TfLiteIntArrayEqual(input1->dims, input2->dims);
  368. }
  369. #ifndef TF_LITE_STATIC_MEMORY
  370. TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context,
  371. const TfLiteTensor* input,
  372. TfLiteIntArray** output_shape) {
  373. if (NumDimensions(input) != 1) {
  374. TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
  375. "Invalid %dD input tensor (must be a 1D tensor).",
  376. NumDimensions(input));
  377. return kTfLiteError;
  378. }
  379. const int output_dims = SizeOfDimension(input, 0);
  380. std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
  381. TfLiteIntArrayCreate(output_dims), TfLiteIntArrayFree);
  382. for (int i = 0; i < output_dims; i++) {
  383. shape->data[i] = input->data.i32[i];
  384. }
  385. *output_shape = shape.release();
  386. return kTfLiteOk;
  387. }
  388. // TODO(b/172067338): Having this function be part of TF_LITE_STATIC_MEMORY
  389. // build results in a 6KB size increase, even though the function is unsused for
  390. // that build. What appears to be happening is that while the linker drops the
  391. // unsused function, the string library that gets pulled in is not dropped,
  392. // resulting in the increased binary size.
  393. const std::string GetShapeDebugString(const TfLiteIntArray* shape) {
  394. std::string str;
  395. for (int d = 0; d < shape->size; ++d) {
  396. if (str.empty())
  397. str = "[" + std::to_string(shape->data[d]);
  398. else
  399. // Don't add space after "," to make the output consistent with
  400. // tensorflow::shape_inference::InferenceContext::DebugString()
  401. str += "," + std::to_string(shape->data[d]);
  402. }
  403. if (str.empty()) {
  404. str = "[]";
  405. } else {
  406. str += "]";
  407. }
  408. return str;
  409. }
  410. TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
  411. const TfLiteTensor* input1,
  412. const TfLiteTensor* input2,
  413. TfLiteIntArray** output_shape) {
  414. const int dims1 = NumDimensions(input1);
  415. const int dims2 = NumDimensions(input2);
  416. const int out_dims = std::max(dims1, dims2);
  417. std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
  418. TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
  419. for (int i = 0; i < out_dims; ++i) {
  420. const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
  421. const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
  422. if (!(d1 == d2 || d1 == 1 || d2 == 1)) {
  423. TF_LITE_KERNEL_LOG(context,
  424. "Given shapes, %s and %s, are not broadcastable.",
  425. GetShapeDebugString(input1->dims).c_str(),
  426. GetShapeDebugString(input2->dims).c_str());
  427. return kTfLiteError;
  428. }
  429. if (d1 == 0 || d2 == 0) {
  430. shape->data[out_dims - i - 1] = 0;
  431. } else {
  432. shape->data[out_dims - i - 1] = std::max(d1, d2);
  433. }
  434. }
  435. *output_shape = shape.release();
  436. return kTfLiteOk;
  437. }
  438. TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
  439. const TfLiteTensor* input1,
  440. const TfLiteTensor* input2,
  441. const TfLiteTensor* input3,
  442. TfLiteIntArray** output_shape) {
  443. const int dims1 = NumDimensions(input1);
  444. const int dims2 = NumDimensions(input2);
  445. const int dims3 = NumDimensions(input3);
  446. const int out_dims = std::max(std::max(dims1, dims2), dims3);
  447. std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
  448. TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
  449. for (int i = 0; i < out_dims; ++i) {
  450. const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
  451. const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
  452. const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1);
  453. const int min_value = std::min(std::min(d1, d2), d3);
  454. int max_value = std::max(std::max(d1, d2), d3);
  455. // If one dimention is 0, others must be 0 or 1.
  456. if (min_value == 0) max_value = 0;
  457. if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) ||
  458. !(d3 == 1 || d3 == max_value)) {
  459. TF_LITE_KERNEL_LOG(context,
  460. "Given shapes, %s, %s and %s, are not broadcastable.",
  461. GetShapeDebugString(input1->dims).c_str(),
  462. GetShapeDebugString(input2->dims).c_str(),
  463. GetShapeDebugString(input3->dims).c_str());
  464. return kTfLiteError;
  465. }
  466. shape->data[out_dims - i - 1] = max_value;
  467. }
  468. *output_shape = shape.release();
  469. return kTfLiteOk;
  470. }
  471. #endif // TF_LITE_STATIC_MEMORY
  472. // Size of string is not constant, return 0 in such case.
  473. int TfLiteTypeGetSize(TfLiteType type) {
  474. switch (type) {
  475. case kTfLiteUInt8:
  476. static_assert(sizeof(uint8_t) == 1, "");
  477. return 1;
  478. case kTfLiteInt8:
  479. static_assert(sizeof(int8_t) == 1, "");
  480. return 1;
  481. case kTfLiteBool:
  482. return sizeof(bool);
  483. case kTfLiteUInt16:
  484. static_assert(sizeof(uint16_t) == 2, "");
  485. return 2;
  486. case kTfLiteInt16:
  487. static_assert(sizeof(int16_t) == 2, "");
  488. return 2;
  489. case kTfLiteFloat16:
  490. static_assert(sizeof(int16_t) == 2, "");
  491. return 2;
  492. case kTfLiteFloat32:
  493. static_assert(sizeof(float) == 4, "");
  494. return 4;
  495. case kTfLiteInt32:
  496. static_assert(sizeof(int32_t) == 4, "");
  497. return 4;
  498. case kTfLiteUInt32:
  499. static_assert(sizeof(uint32_t) == 4, "");
  500. return 4;
  501. case kTfLiteInt64:
  502. static_assert(sizeof(int64_t) == 8, "");
  503. return 8;
  504. case kTfLiteUInt64:
  505. static_assert(sizeof(uint64_t) == 8, "");
  506. return 8;
  507. case kTfLiteFloat64:
  508. static_assert(sizeof(double) == 8, "");
  509. return 8;
  510. case kTfLiteComplex64:
  511. static_assert(sizeof(std::complex<float>) == 8, "");
  512. return 8;
  513. case kTfLiteComplex128:
  514. static_assert(sizeof(std::complex<double>) == 16, "");
  515. return 16;
  516. default:
  517. return 0;
  518. }
  519. }
  520. bool IsMobilePlatform() {
  521. #if defined(ANDROID) || defined(__ANDROID__)
  522. return true;
  523. #elif defined(__APPLE__)
  524. #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
  525. return true;
  526. #endif
  527. #endif
  528. return false;
  529. }
  530. bool HasUnspecifiedDimension(const TfLiteTensor* tensor) {
  531. #ifndef TF_LITE_STATIC_MEMORY
  532. if (tensor->dims_signature) {
  533. for (int i : TfLiteIntArrayView(tensor->dims_signature)) {
  534. if (i == -1) return true;
  535. }
  536. }
  537. #endif // TF_LITE_STATIC_MEMORY
  538. return false;
  539. }
  540. } // namespace tflite