CTfLiteClass.cpp 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. #include "CTfLiteClass.h"
  2. #include "ClassLogFile.h"
  3. #include "Helper.h"
  4. #include "psram.h"
  5. #include "esp_log.h"
  6. #include "../../include/defines.h"
  7. #include <sys/stat.h>
  8. // #define DEBUG_DETAIL_ON
  9. static const char *TAG = "TFLITE";
  10. float CTfLiteClass::GetOutputValue(int nr)
  11. {
  12. TfLiteTensor* output2 = this->interpreter->output(0);
  13. int numeroutput = output2->dims->data[1];
  14. if ((nr+1) > numeroutput)
  15. return -1000;
  16. return output2->data.f[nr];
  17. }
  18. int CTfLiteClass::GetClassFromImageBasis(CImageBasis *rs)
  19. {
  20. if (!LoadInputImageBasis(rs))
  21. return -1000;
  22. Invoke();
  23. return GetOutClassification();
  24. }
  25. int CTfLiteClass::GetOutClassification(int _von, int _bis)
  26. {
  27. TfLiteTensor* output2 = interpreter->output(0);
  28. float zw_max;
  29. float zw;
  30. int zw_class;
  31. if (output2 == NULL)
  32. return -1;
  33. int numeroutput = output2->dims->data[1];
  34. //ESP_LOGD(TAG, "number output neurons: %d", numeroutput);
  35. if (_bis == -1)
  36. _bis = numeroutput -1;
  37. if (_von == -1)
  38. _von = 0;
  39. if (_bis >= numeroutput)
  40. {
  41. ESP_LOGD(TAG, "NUMBER OF OUTPUT NEURONS does not match required classification!");
  42. return -1;
  43. }
  44. zw_max = output2->data.f[_von];
  45. zw_class = _von;
  46. for (int i = _von + 1; i <= _bis; ++i)
  47. {
  48. zw = output2->data.f[i];
  49. if (zw > zw_max)
  50. {
  51. zw_max = zw;
  52. zw_class = i;
  53. }
  54. }
  55. return (zw_class - _von);
  56. }
  57. void CTfLiteClass::GetInputDimension(bool silent = false)
  58. {
  59. TfLiteTensor* input2 = this->interpreter->input(0);
  60. int numdim = input2->dims->size;
  61. if (!silent) ESP_LOGD(TAG, "NumDimension: %d", numdim);
  62. int sizeofdim;
  63. for (int j = 0; j < numdim; ++j)
  64. {
  65. sizeofdim = input2->dims->data[j];
  66. if (!silent) ESP_LOGD(TAG, "SizeOfDimension %d: %d", j, sizeofdim);
  67. if (j == 1) im_height = sizeofdim;
  68. if (j == 2) im_width = sizeofdim;
  69. if (j == 3) im_channel = sizeofdim;
  70. }
  71. }
  72. int CTfLiteClass::ReadInputDimenstion(int _dim)
  73. {
  74. if (_dim == 0)
  75. return im_width;
  76. if (_dim == 1)
  77. return im_height;
  78. if (_dim == 2)
  79. return im_channel;
  80. return -1;
  81. }
  82. int CTfLiteClass::GetAnzOutPut(bool silent)
  83. {
  84. TfLiteTensor* output2 = this->interpreter->output(0);
  85. int numdim = output2->dims->size;
  86. if (!silent) ESP_LOGD(TAG, "NumDimension: %d", numdim);
  87. int sizeofdim;
  88. for (int j = 0; j < numdim; ++j)
  89. {
  90. sizeofdim = output2->dims->data[j];
  91. if (!silent) ESP_LOGD(TAG, "SizeOfDimension %d: %d", j, sizeofdim);
  92. }
  93. float fo;
  94. // Process the inference results.
  95. int numeroutput = output2->dims->data[1];
  96. for (int i = 0; i < numeroutput; ++i)
  97. {
  98. fo = output2->data.f[i];
  99. if (!silent) ESP_LOGD(TAG, "Result %d: %f", i, fo);
  100. }
  101. return numeroutput;
  102. }
  103. void CTfLiteClass::Invoke()
  104. {
  105. if (interpreter != nullptr)
  106. interpreter->Invoke();
  107. }
  108. bool CTfLiteClass::LoadInputImageBasis(CImageBasis *rs)
  109. {
  110. #ifdef DEBUG_DETAIL_ON
  111. LogFile.WriteHeapInfo("CTfLiteClass::LoadInputImageBasis - Start");
  112. #endif
  113. unsigned int w = rs->width;
  114. unsigned int h = rs->height;
  115. unsigned char red, green, blue;
  116. // ESP_LOGD(TAG, "Image: %s size: %d x %d\n", _fn.c_str(), w, h);
  117. input_i = 0;
  118. float* input_data_ptr = (interpreter->input(0))->data.f;
  119. for (int y = 0; y < h; ++y)
  120. for (int x = 0; x < w; ++x)
  121. {
  122. red = rs->GetPixelColor(x, y, 0);
  123. green = rs->GetPixelColor(x, y, 1);
  124. blue = rs->GetPixelColor(x, y, 2);
  125. *(input_data_ptr) = (float) red;
  126. input_data_ptr++;
  127. *(input_data_ptr) = (float) green;
  128. input_data_ptr++;
  129. *(input_data_ptr) = (float) blue;
  130. input_data_ptr++;
  131. }
  132. #ifdef DEBUG_DETAIL_ON
  133. LogFile.WriteHeapInfo("CTfLiteClass::LoadInputImageBasis - done");
  134. #endif
  135. return true;
  136. }
  137. bool CTfLiteClass::MakeAllocate()
  138. {
  139. static tflite::AllOpsResolver resolver;
  140. #ifdef DEBUG_DETAIL_ON
  141. LogFile.WriteHeapInfo("CTLiteClass::Alloc start");
  142. #endif
  143. LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "CTfLiteClass::MakeAllocate");
  144. this->interpreter = new tflite::MicroInterpreter(this->model, resolver, this->tensor_arena, this->kTensorArenaSize, this->error_reporter);
  145. if (this->interpreter)
  146. {
  147. TfLiteStatus allocate_status = this->interpreter->AllocateTensors();
  148. if (allocate_status != kTfLiteOk) {
  149. TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
  150. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "AllocateTensors() failed");
  151. this->GetInputDimension();
  152. return false;
  153. }
  154. }
  155. else
  156. {
  157. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "new tflite::MicroInterpreter failed");
  158. LogFile.WriteHeapInfo("CTfLiteClass::MakeAllocate-new tflite::MicroInterpreter failed");
  159. return false;
  160. }
  161. #ifdef DEBUG_DETAIL_ON
  162. LogFile.WriteHeapInfo("CTLiteClass::Alloc done");
  163. #endif
  164. return true;
  165. }
  166. void CTfLiteClass::GetInputTensorSize()
  167. {
  168. #ifdef DEBUG_DETAIL_ON
  169. float *zw = this->input;
  170. int test = sizeof(zw);
  171. ESP_LOGD(TAG, "Input Tensor Dimension: %d", test);
  172. #endif
  173. }
  174. long CTfLiteClass::GetFileSize(std::string filename)
  175. {
  176. struct stat stat_buf;
  177. long rc = stat(filename.c_str(), &stat_buf);
  178. return rc == 0 ? stat_buf.st_size : -1;
  179. }
  180. bool CTfLiteClass::ReadFileToModel(std::string _fn)
  181. {
  182. LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "CTfLiteClass::ReadFileToModel: " + _fn);
  183. long size = GetFileSize(_fn);
  184. if (size == -1)
  185. {
  186. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Model file doesn't exist: " + _fn + "!");
  187. return false;
  188. }
  189. else if(size > MAX_MODEL_SIZE) {
  190. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Unable to load model '" + _fn + "'! It does not fit in the reserved shared memory in PSRAM!");
  191. return false;
  192. }
  193. LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Loading Model " + _fn + " /size: " + std::to_string(size) + " bytes...");
  194. #ifdef DEBUG_DETAIL_ON
  195. LogFile.WriteHeapInfo("CTLiteClass::Alloc modelfile start");
  196. #endif
  197. <<<<<<< HEAD
  198. modelfile = (unsigned char*)psram_get_shared_model_memory();
  199. =======
  200. modelfile = (unsigned char*)malloc_psram_heap(std::string(TAG) + "->modelfile", size, MALLOC_CAP_SPIRAM);
  201. >>>>>>> master
  202. if(modelfile != NULL)
  203. {
  204. FILE* f = fopen(_fn.c_str(), "rb"); // previously only "r
  205. fread(modelfile, 1, size, f);
  206. fclose(f);
  207. #ifdef DEBUG_DETAIL_ON
  208. LogFile.WriteHeapInfo("CTLiteClass::Alloc modelfile successful");
  209. #endif
  210. return true;
  211. }
  212. else
  213. {
  214. LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "CTfLiteClass::ReadFileToModel: Can't allocate enough memory: " + std::to_string(size));
  215. LogFile.WriteHeapInfo("CTfLiteClass::ReadFileToModel");
  216. return false;
  217. }
  218. }
  219. bool CTfLiteClass::LoadModel(std::string _fn)
  220. {
  221. #ifdef SUPRESS_TFLITE_ERRORS
  222. this->error_reporter = new tflite::OwnMicroErrorReporter;
  223. #else
  224. this->error_reporter = new tflite::MicroErrorReporter;
  225. #endif
  226. LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "CTfLiteClass::LoadModel");
  227. if (!ReadFileToModel(_fn.c_str())) {
  228. return false;
  229. }
  230. model = tflite::GetModel(modelfile);
  231. if(model == nullptr)
  232. return false;
  233. return true;
  234. }
  235. CTfLiteClass::CTfLiteClass()
  236. {
  237. this->model = nullptr;
  238. this->modelfile = NULL;
  239. this->interpreter = nullptr;
  240. this->input = nullptr;
  241. <<<<<<< HEAD
  242. this->output = nullptr;
  243. this->kTensorArenaSize = TENSOR_ARENA_SIZE;
  244. this->tensor_arena = (uint8_t*)psram_get_shared_tensor_arena_memory();
  245. =======
  246. this->output = nullptr;
  247. this->kTensorArenaSize = 800 * 1024; /// according to testfile: 108000 - so far 600;; 2021-09-11: 200 * 1024
  248. this->tensor_arena = (uint8_t*)malloc_psram_heap(std::string(TAG) + "->tensor_arena", kTensorArenaSize, MALLOC_CAP_SPIRAM);
  249. >>>>>>> master
  250. }
  251. CTfLiteClass::~CTfLiteClass()
  252. {
  253. delete this->interpreter;
  254. delete this->error_reporter;
  255. <<<<<<< HEAD
  256. psram_free_shared_tensor_arena_and_model_memory();
  257. =======
  258. free_psram_heap(std::string(TAG) + "->modelfile", modelfile);
  259. free_psram_heap(std::string(TAG) + "->tensor_arena", this->tensor_arena);
  260. >>>>>>> master
  261. }
  262. namespace tflite
  263. {
  264. int OwnMicroErrorReporter::Report(const char* format, va_list args)
  265. {
  266. return 0;
  267. }
  268. }