CTfLiteClass.cpp 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. #include "CTfLiteClass.h"
  2. #include "ClassLogFile.h"
  3. #include "Helper.h"
  4. #include <sys/stat.h>
  5. // #define DEBUG_DETAIL_ON
  6. float CTfLiteClass::GetOutputValue(int nr)
  7. {
  8. TfLiteTensor* output2 = this->interpreter->output(0);
  9. int numeroutput = output2->dims->data[1];
  10. if ((nr+1) > numeroutput)
  11. return -1000;
  12. return output2->data.f[nr];
  13. }
  14. int CTfLiteClass::GetClassFromImageBasis(CImageBasis *rs)
  15. {
  16. if (!LoadInputImageBasis(rs))
  17. return -1000;
  18. Invoke();
  19. return GetOutClassification();
  20. }
  21. int CTfLiteClass::GetOutClassification(int _von, int _bis)
  22. {
  23. TfLiteTensor* output2 = interpreter->output(0);
  24. float zw_max;
  25. float zw;
  26. int zw_class;
  27. if (output2 == NULL)
  28. return -1;
  29. int numeroutput = output2->dims->data[1];
  30. //printf("\n number output neurons: %d\n\n", numeroutput);
  31. if (_bis == -1)
  32. _bis = numeroutput -1;
  33. if (_von == -1)
  34. _von = 0;
  35. if (_bis >= numeroutput)
  36. {
  37. printf("ANZAHL OUTPUT NEURONS passt nicht zu geforderter Classifizierung!");
  38. return -1;
  39. }
  40. zw_max = output2->data.f[_von];
  41. zw_class = _von;
  42. for (int i = _von + 1; i <= _bis; ++i)
  43. {
  44. zw = output2->data.f[i];
  45. if (zw > zw_max)
  46. {
  47. zw_max = zw;
  48. zw_class = i;
  49. }
  50. }
  51. return (zw_class - _von);
  52. }
  53. void CTfLiteClass::GetInputDimension(bool silent = false)
  54. {
  55. TfLiteTensor* input2 = this->interpreter->input(0);
  56. int numdim = input2->dims->size;
  57. if (!silent) printf("NumDimension: %d\n", numdim);
  58. int sizeofdim;
  59. for (int j = 0; j < numdim; ++j)
  60. {
  61. sizeofdim = input2->dims->data[j];
  62. if (!silent) printf("SizeOfDimension %d: %d\n", j, sizeofdim);
  63. if (j == 1) im_height = sizeofdim;
  64. if (j == 2) im_width = sizeofdim;
  65. if (j == 3) im_channel = sizeofdim;
  66. }
  67. }
  68. int CTfLiteClass::ReadInputDimenstion(int _dim)
  69. {
  70. if (_dim == 0)
  71. return im_width;
  72. if (_dim == 1)
  73. return im_height;
  74. if (_dim == 2)
  75. return im_channel;
  76. return -1;
  77. }
  78. int CTfLiteClass::GetAnzOutPut(bool silent)
  79. {
  80. TfLiteTensor* output2 = this->interpreter->output(0);
  81. int numdim = output2->dims->size;
  82. if (!silent) printf("NumDimension: %d\n", numdim);
  83. int sizeofdim;
  84. for (int j = 0; j < numdim; ++j)
  85. {
  86. sizeofdim = output2->dims->data[j];
  87. if (!silent) printf("SizeOfDimension %d: %d\n", j, sizeofdim);
  88. }
  89. float fo;
  90. // Process the inference results.
  91. int numeroutput = output2->dims->data[1];
  92. for (int i = 0; i < numeroutput; ++i)
  93. {
  94. fo = output2->data.f[i];
  95. if (!silent) printf("Result %d: %f\n", i, fo);
  96. }
  97. return numeroutput;
  98. }
  99. void CTfLiteClass::Invoke()
  100. {
  101. if (interpreter != nullptr)
  102. interpreter->Invoke();
  103. }
  104. bool CTfLiteClass::LoadInputImageBasis(CImageBasis *rs)
  105. {
  106. std::string zw = "ClassFlowCNNGeneral::doNeuralNetwork after LoadInputResizeImage: ";
  107. unsigned int w = rs->width;
  108. unsigned int h = rs->height;
  109. unsigned char red, green, blue;
  110. // printf("Image: %s size: %d x %d\n", _fn.c_str(), w, h);
  111. input_i = 0;
  112. float* input_data_ptr = (interpreter->input(0))->data.f;
  113. for (int y = 0; y < h; ++y)
  114. for (int x = 0; x < w; ++x)
  115. {
  116. red = rs->GetPixelColor(x, y, 0);
  117. green = rs->GetPixelColor(x, y, 1);
  118. blue = rs->GetPixelColor(x, y, 2);
  119. *(input_data_ptr) = (float) red;
  120. input_data_ptr++;
  121. *(input_data_ptr) = (float) green;
  122. input_data_ptr++;
  123. *(input_data_ptr) = (float) blue;
  124. input_data_ptr++;
  125. }
  126. #ifdef DEBUG_DETAIL_ON
  127. LogFile.WriteToFile("Nach dem Laden in input");
  128. #endif
  129. return true;
  130. }
  131. void CTfLiteClass::MakeAllocate()
  132. {
  133. static tflite::AllOpsResolver resolver;
  134. // printf(LogFile.getESPHeapInfo().c_str()); printf("\n");
  135. this->interpreter = new tflite::MicroInterpreter(this->model, resolver, this->tensor_arena, this->kTensorArenaSize, this->error_reporter);
  136. // printf(LogFile.getESPHeapInfo().c_str()); printf("\n");
  137. TfLiteStatus allocate_status = this->interpreter->AllocateTensors();
  138. if (allocate_status != kTfLiteOk) {
  139. TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
  140. LogFile.WriteToFile("AllocateTensors() failed");
  141. this->GetInputDimension();
  142. return;
  143. }
  144. // printf("Allocate Done.\n");
  145. }
  146. void CTfLiteClass::GetInputTensorSize(){
  147. #ifdef DEBUG_DETAIL_ON
  148. float *zw = this->input;
  149. int test = sizeof(zw);
  150. printf("Input Tensor Dimension: %d\n", test);
  151. #endif
  152. }
  153. long CTfLiteClass::GetFileSize(std::string filename)
  154. {
  155. struct stat stat_buf;
  156. long rc = stat(filename.c_str(), &stat_buf);
  157. return rc == 0 ? stat_buf.st_size : -1;
  158. }
  159. unsigned char* CTfLiteClass::ReadFileToCharArray(std::string _fn)
  160. {
  161. long size;
  162. size = GetFileSize(_fn);
  163. if (size == -1)
  164. {
  165. printf("\nFile doesn't exist.\n");
  166. return NULL;
  167. }
  168. unsigned char *result = (unsigned char*) malloc(size);
  169. int anz = 1;
  170. while (!result && (anz < 6)) // maximal 5x versuchen (= 5s)
  171. {
  172. #ifdef DEBUG_DETAIL_ON
  173. printf("Speicher ist voll - Versuche es erneut: %d.\n", anz);
  174. #endif
  175. result = (unsigned char*) malloc(size);
  176. anz++;
  177. }
  178. if(result != NULL) {
  179. FILE* f = OpenFileAndWait(_fn.c_str(), "rb"); // vorher nur "r"
  180. fread(result, 1, size, f);
  181. fclose(f);
  182. }else {
  183. printf("\nNo free memory available.\n");
  184. }
  185. return result;
  186. }
  187. bool CTfLiteClass::LoadModel(std::string _fn){
  188. #ifdef SUPRESS_TFLITE_ERRORS
  189. this->error_reporter = new tflite::OwnMicroErrorReporter;
  190. #else
  191. this->error_reporter = new tflite::MicroErrorReporter;
  192. #endif
  193. modelload = ReadFileToCharArray(_fn.c_str());
  194. if (modelload == NULL)
  195. return false;
  196. model = tflite::GetModel(modelload);
  197. // free(rd);
  198. TFLITE_MINIMAL_CHECK(model != nullptr);
  199. return true;
  200. }
  201. CTfLiteClass::CTfLiteClass()
  202. {
  203. this->model = nullptr;
  204. this->interpreter = nullptr;
  205. this->input = nullptr;
  206. this->output = nullptr;
  207. this->kTensorArenaSize = 800 * 1024; /// laut testfile: 108000 - bisher 600;; 2021-09-11: 200 * 1024
  208. this->tensor_arena = new uint8_t[kTensorArenaSize];
  209. }
  210. CTfLiteClass::~CTfLiteClass()
  211. {
  212. delete this->tensor_arena;
  213. delete this->interpreter;
  214. delete this->error_reporter;
  215. if (modelload)
  216. free(modelload);
  217. }
  218. namespace tflite {
  219. int OwnMicroErrorReporter::Report(const char* format, va_list args) {
  220. return 0;
  221. }
  222. }