CTfLiteClass.cpp 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. #include "CTfLiteClass.h"
  2. #include "ClassLogFile.h"
  3. #include "Helper.h"
  4. #include "psram.h"
  5. #include "esp_log.h"
  6. #include "../../include/defines.h"
  7. #include <sys/stat.h>
  8. // #define DEBUG_DETAIL_ON
  9. static const char *TAG = "TFLITE";
  10. void CTfLiteClass::MakeStaticResolver()
  11. {
  12. resolver.AddFullyConnected();
  13. resolver.AddReshape();
  14. resolver.AddSoftmax();
  15. resolver.AddConv2D();
  16. resolver.AddMaxPool2D();
  17. resolver.AddQuantize();
  18. resolver.AddMul();
  19. resolver.AddAdd();
  20. resolver.AddLeakyRelu();
  21. resolver.AddDequantize();
  22. }
  23. float CTfLiteClass::GetOutputValue(int nr)
  24. {
  25. TfLiteTensor* output2 = this->interpreter->output(0);
  26. int numeroutput = output2->dims->data[1];
  27. if ((nr+1) > numeroutput)
  28. return -1000;
  29. return output2->data.f[nr];
  30. }
  31. int CTfLiteClass::GetClassFromImageBasis(CImageBasis *rs)
  32. {
  33. if (!LoadInputImageBasis(rs))
  34. return -1000;
  35. Invoke();
  36. return GetOutClassification();
  37. }
  38. int CTfLiteClass::GetOutClassification(int _von, int _bis)
  39. {
  40. TfLiteTensor* output2 = interpreter->output(0);
  41. float zw_max;
  42. float zw;
  43. int zw_class;
  44. if (output2 == NULL)
  45. return -1;
  46. int numeroutput = output2->dims->data[1];
  47. //ESP_LOGD(TAG, "number output neurons: %d", numeroutput);
  48. if (_bis == -1)
  49. _bis = numeroutput -1;
  50. if (_von == -1)
  51. _von = 0;
  52. if (_bis >= numeroutput)
  53. {
  54. ESP_LOGD(TAG, "NUMBER OF OUTPUT NEURONS does not match required classification!");
  55. return -1;
  56. }
  57. zw_max = output2->data.f[_von];
  58. zw_class = _von;
  59. for (int i = _von + 1; i <= _bis; ++i)
  60. {
  61. zw = output2->data.f[i];
  62. if (zw > zw_max)
  63. {
  64. zw_max = zw;
  65. zw_class = i;
  66. }
  67. }
  68. return (zw_class - _von);
  69. }
  70. void CTfLiteClass::GetInputDimension(bool silent = false)
  71. {
  72. TfLiteTensor* input2 = this->interpreter->input(0);
  73. int numdim = input2->dims->size;
  74. if (!silent) ESP_LOGD(TAG, "NumDimension: %d", numdim);
  75. int sizeofdim;
  76. for (int j = 0; j < numdim; ++j)
  77. {
  78. sizeofdim = input2->dims->data[j];
  79. if (!silent) ESP_LOGD(TAG, "SizeOfDimension %d: %d", j, sizeofdim);
  80. if (j == 1) im_height = sizeofdim;
  81. if (j == 2) im_width = sizeofdim;
  82. if (j == 3) im_channel = sizeofdim;
  83. }
  84. }
  85. int CTfLiteClass::ReadInputDimenstion(int _dim)
  86. {
  87. if (_dim == 0)
  88. return im_width;
  89. if (_dim == 1)
  90. return im_height;
  91. if (_dim == 2)
  92. return im_channel;
  93. return -1;
  94. }
  95. int CTfLiteClass::GetAnzOutPut(bool silent)
  96. {
  97. TfLiteTensor* output2 = this->interpreter->output(0);
  98. int numdim = output2->dims->size;
  99. if (!silent) ESP_LOGD(TAG, "NumDimension: %d", numdim);
  100. int sizeofdim;
  101. for (int j = 0; j < numdim; ++j)
  102. {
  103. sizeofdim = output2->dims->data[j];
  104. if (!silent) ESP_LOGD(TAG, "SizeOfDimension %d: %d", j, sizeofdim);
  105. }
  106. float fo;
  107. // Process the inference results.
  108. int numeroutput = output2->dims->data[1];
  109. for (int i = 0; i < numeroutput; ++i)
  110. {
  111. fo = output2->data.f[i];
  112. if (!silent) ESP_LOGD(TAG, "Result %d: %f", i, fo);
  113. }
  114. return numeroutput;
  115. }
  116. void CTfLiteClass::Invoke()
  117. {
  118. if (interpreter != nullptr)
  119. interpreter->Invoke();
  120. }
  121. bool CTfLiteClass::LoadInputImageBasis(CImageBasis *rs)
  122. {
  123. #ifdef DEBUG_DETAIL_ON
  124. LogFile.WriteHeapInfo("CTfLiteClass::LoadInputImageBasis - Start");
  125. #endif
  126. unsigned int w = rs->width;
  127. unsigned int h = rs->height;
  128. unsigned char red, green, blue;
  129. // ESP_LOGD(TAG, "Image: %s size: %d x %d\n", _fn.c_str(), w, h);
  130. input_i = 0;
  131. float* input_data_ptr = (interpreter->input(0))->data.f;
  132. for (int y = 0; y < h; ++y)
  133. for (int x = 0; x < w; ++x)
  134. {
  135. red = rs->GetPixelColor(x, y, 0);
  136. green = rs->GetPixelColor(x, y, 1);
  137. blue = rs->GetPixelColor(x, y, 2);
  138. *(input_data_ptr) = (float) red;
  139. input_data_ptr++;
  140. *(input_data_ptr) = (float) green;
  141. input_data_ptr++;
  142. *(input_data_ptr) = (float) blue;
  143. input_data_ptr++;
  144. }
  145. #ifdef DEBUG_DETAIL_ON
  146. LogFile.WriteHeapInfo("CTfLiteClass::LoadInputImageBasis - done");
  147. #endif
  148. return true;
  149. }
  150. bool CTfLiteClass::MakeAllocate()
  151. {
  152. MakeStaticResolver();
  153. #ifdef DEBUG_DETAIL_ON
  154. LogFile.WriteHeapInfo("CTLiteClass::Alloc start");
  155. #endif
  156. LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "CTfLiteClass::MakeAllocate");
  157. this->interpreter = new tflite::MicroInterpreter(this->model, resolver, this->tensor_arena, this->kTensorArenaSize);
  158. if (this->interpreter)
  159. {
  160. TfLiteStatus allocate_status = this->interpreter->AllocateTensors();
  161. if (allocate_status != kTfLiteOk) {
  162. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "AllocateTensors() failed");
  163. this->GetInputDimension();
  164. return false;
  165. }
  166. }
  167. else
  168. {
  169. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "new tflite::MicroInterpreter failed");
  170. LogFile.WriteHeapInfo("CTfLiteClass::MakeAllocate-new tflite::MicroInterpreter failed");
  171. return false;
  172. }
  173. #ifdef DEBUG_DETAIL_ON
  174. LogFile.WriteHeapInfo("CTLiteClass::Alloc done");
  175. #endif
  176. return true;
  177. }
  178. void CTfLiteClass::GetInputTensorSize()
  179. {
  180. #ifdef DEBUG_DETAIL_ON
  181. float *zw = this->input;
  182. int test = sizeof(zw);
  183. ESP_LOGD(TAG, "Input Tensor Dimension: %d", test);
  184. #endif
  185. }
  186. long CTfLiteClass::GetFileSize(std::string filename)
  187. {
  188. struct stat stat_buf;
  189. long rc = stat(filename.c_str(), &stat_buf);
  190. return rc == 0 ? stat_buf.st_size : -1;
  191. }
  192. bool CTfLiteClass::ReadFileToModel(std::string _fn)
  193. {
  194. LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "CTfLiteClass::ReadFileToModel: " + _fn);
  195. long size = GetFileSize(_fn);
  196. if (size == -1)
  197. {
  198. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Model file doesn't exist: " + _fn + "!");
  199. return false;
  200. }
  201. else if(size > MAX_MODEL_SIZE) {
  202. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Unable to load model '" + _fn + "'! It does not fit in the reserved shared memory in PSRAM!");
  203. return false;
  204. }
  205. LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Loading Model " + _fn + " /size: " + std::to_string(size) + " bytes...");
  206. #ifdef DEBUG_DETAIL_ON
  207. LogFile.WriteHeapInfo("CTLiteClass::Alloc modelfile start");
  208. #endif
  209. modelfile = (unsigned char*)psram_get_shared_model_memory();
  210. if(modelfile != NULL)
  211. {
  212. FILE* f = fopen(_fn.c_str(), "rb"); // previously only "r
  213. fread(modelfile, 1, size, f);
  214. fclose(f);
  215. #ifdef DEBUG_DETAIL_ON
  216. LogFile.WriteHeapInfo("CTLiteClass::Alloc modelfile successful");
  217. #endif
  218. return true;
  219. }
  220. else
  221. {
  222. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "CTfLiteClass::ReadFileToModel: Can't allocate enough memory: " + std::to_string(size));
  223. LogFile.WriteHeapInfo("CTfLiteClass::ReadFileToModel");
  224. return false;
  225. }
  226. }
  227. bool CTfLiteClass::LoadModel(std::string _fn)
  228. {
  229. LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "CTfLiteClass::LoadModel");
  230. if (!ReadFileToModel(_fn.c_str())) {
  231. return false;
  232. }
  233. model = tflite::GetModel(modelfile);
  234. if(model == nullptr)
  235. return false;
  236. return true;
  237. }
  238. CTfLiteClass::CTfLiteClass()
  239. {
  240. this->model = nullptr;
  241. this->modelfile = NULL;
  242. this->interpreter = nullptr;
  243. this->input = nullptr;
  244. this->output = nullptr;
  245. this->kTensorArenaSize = TENSOR_ARENA_SIZE;
  246. this->tensor_arena = (uint8_t*)psram_get_shared_tensor_arena_memory();
  247. }
  248. CTfLiteClass::~CTfLiteClass()
  249. {
  250. delete this->interpreter;
  251. psram_free_shared_tensor_arena_and_model_memory();
  252. }
  253. #ifdef SUPRESS_TFLITE_ERRORS
  254. namespace tflite
  255. {
  256. //tflite::ErrorReporter
  257. // int OwnMicroErrorReporter::Report(const char* format, va_list args)
  258. int OwnMicroErrorReporter::Report(const char* format, va_list args)
  259. {
  260. return 0;
  261. }
  262. }
  263. #endif