2323int main ()
2424{
2525 /* read input image data */
26- cv::Mat image = cv::imread (RESOURCE_DIR" test.jpg" , 1 );
26+ cv::Mat image = cv::imread (RESOURCE_DIR" test.jpg" , 1 ), prepimage ;
2727 cv::imshow (" InputImage" , image);
2828
2929 // Initialize Colormap.
@@ -32,9 +32,10 @@ int main()
3232 // Initialize Threads.
3333 auto num_of_threads = 4 ;
3434
35- cv::resize (image, image, cv::Size (480 , 360 ));
36- cv::imshow (" InputImage for CNN" , image);
37- image.convertTo (image, CV_32FC1, 1.0 / 255 );
35+ cv::resize (image, prepimage, cv::Size (480 , 360 ));
36+ prepimage.convertTo (prepimage, CV_32FC1, 1.0 / 255 );
37+ cv::imshow (" InputImage for CNN" , prepimage);
38+ cv::waitKey (0 );
3839
3940 // TFLite
4041 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile (MODEL_FILENAME);
@@ -74,7 +75,8 @@ int main()
7475 // Set data to input tensor
7576 printf (" === MEM copy ===\n " );
7677 float * input = interpreter->typed_input_tensor <float >(0 );
77- memcpy (input, image.data , sizeof (float ) * input_array_size);
78+ // memcpy(input, prepimage.reshape(0, 1).data, sizeof(float) * input_array_size);
79+ memcpy (input, prepimage.data , sizeof (float ) * input_array_size);
7880
7981 // Run inference
8082 printf (" === Pre-invoke ===\n " );
@@ -88,8 +90,15 @@ int main()
8890 std::vector<float > output_data;
8991 const auto & output_indices = interpreter->outputs ();
9092 const int num_outputs = output_indices.size ();
93+ std::cout << " num_outputs: " << num_outputs << std::endl;
94+ // Inputs: 1040
95+ // num_outputs: 2
96+ // output_indices[0]: 716 ENet/fullconv/BiasAdd
97+ // output_indices[1]: 726 ENet/logits_to_softmax
98+ std::cout << " output_indices[0]: " << output_indices[0 ] << std::endl;
99+ std::cout << " output_indices[1]: " << output_indices[1 ] << std::endl;
91100 int out_idx = 0 ;
92- for (int i = 0 ; i < num_outputs; ++i)
101+ /* for (int i = 0; i < num_outputs; ++i)
93102 {
94103 const auto* out_tensor = interpreter->tensor(output_indices[i]);
95104 assert(out_tensor != nullptr);
@@ -100,16 +109,37 @@ int main()
100109 {
101110 output_data[out_idx++] = output[j];
102111 }
112+ }*/
113+ const auto * out_tensor = interpreter->tensor (output_indices[1 ]);
114+ assert (out_tensor != nullptr );
115+ const int num_values = out_tensor->bytes / sizeof (float );
116+ output_data.resize (out_idx + num_values);
117+ const float * output = interpreter->typed_output_tensor <float >(1 );
118+ for (int i = 0 ; i < num_values; ++i)
119+ {
120+ output_data[out_idx++] = output[i];
103121 }
104122
123+ // std::ostringstream output1_string_stream;
124+ // std::copy(output_data.begin(), output_data.end(), std::ostream_iterator<int>(output1_string_stream, " "));
125+ // std::cout << "output1 value: " << output1_string_stream.str() << std::endl;
126+
127+ std::ostringstream output2_string_stream;
128+ std::copy (output_data.begin (), output_data.end (), std::ostream_iterator<int >(output2_string_stream, " " ));
129+ std::cout << " output2 value: " << output2_string_stream.str () << std::endl;
130+
105131 // Create segmantation map.
106- cv::Mat seg_im (cv::Size (input_tensor_shape[1 ], input_tensor_shape[2 ]), CV_8UC3);
132+ printf (" === Create segmantation map start ===\n " );
133+ cv::Mat seg_im (cv::Size (input_tensor_shape[1 ], input_tensor_shape[2 ]), CV_32FC1);
134+ printf (" === Create segmantation map 1 ===\n " );
107135 LabelToColorMap (output_data, *color_map.get (), seg_im);
136+ printf (" === Create segmantation map end ===\n " );
108137
109138 // output tensor size => camera resolution
110139 cv::resize (seg_im, seg_im, cv::Size (480 , 360 ));
111- seg_im = (image / 2 ) + (seg_im / 2 );
140+ // seg_im = (image / 2) + (seg_im / 2);
112141
142+ cv::imshow (" Segmentation Image" , seg_im);
113143 cv::waitKey (0 );
114144 return 0 ;
115145}
0 commit comments