Skip to content
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Adapt object_detection_demo for batch processing
  • Loading branch information
sivanov-work committed Dec 14, 2023
commit 700c86349c9de437e68de0afdd6636265c572dd8
3 changes: 3 additions & 0 deletions demos/common/cpp/models/include/models/detection_model_ssd.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ class ModelSSD : public DetectionModel {
const std::string& layout = "");

std::shared_ptr<InternalModelData> preprocess(const InputData& inputData, ov::InferRequest& request) override;
std::shared_ptr<InternalModelData> preprocess(std::vector<std::shared_ptr<InputData>>::iterator inputDataBegin,
std::vector<std::shared_ptr<InputData>>::iterator inputDataEnd,
ov::InferRequest& request) override;
std::unique_ptr<ResultBase> postprocess(InferenceResult& infResult) override;

protected:
Expand Down
11 changes: 0 additions & 11 deletions demos/common/cpp/models/src/classification_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,27 +44,16 @@ ClassificationModel::ClassificationModel(const std::string& modelFileName,

std::unique_ptr<ResultBase> ClassificationModel::postprocess(InferenceResult& infResult) {
const ov::Tensor& indicesTensor = infResult.outputsData.find(outputsNames[0])->second;
const void* indicesTensorBuffer = reinterpret_cast<const void*>(indicesTensor.data());
std::cout << "-S- indices tensor data: " << indicesTensorBuffer << ", size: " << indicesTensor.get_size() << std::endl;
const int* indicesPtr = indicesTensor.data<int>();
for (int i = 0; i < indicesTensor.get_size(); i++){
std::cout << "-S- index[" << i << "]: " << indicesPtr[i] <<std::endl;
}
const ov::Tensor& scoresTensor = infResult.outputsData.find(outputsNames[1])->second;
const float* scoresPtr = scoresTensor.data<float>();
const void* scoresTensorBuffer = reinterpret_cast<const void*>(scoresTensor.data());
std::cout << "-S- scores tensor data: " << scoresTensorBuffer << ", size: " << scoresTensor.get_size() <<std::endl;
for (int i = 0; i < scoresTensor.get_size(); i++){
std::cout << "-S- score[" << i << "]: " << scoresPtr[i] <<std::endl;
}

ClassificationResult* result = new ClassificationResult(infResult.frameId, infResult.metaData);
auto retVal = std::unique_ptr<ResultBase>(result);

result->topLabels.reserve(scoresTensor.get_size());
for (size_t i = 0; i < scoresTensor.get_size(); ++i) {
int ind = indicesPtr[i];
std::cout << "-S- index???[" << i << "]: " << ind << ", labels size: " << labels.size() <<std::endl;
if (ind < 0 || ind >= static_cast<int>(labels.size())) {
throw std::runtime_error(std::string("Invalid index: ") + std::to_string(ind) + " for the class label is found during postprocessing, label size: " + std::to_string(labels.size()));
}
Expand Down
15 changes: 15 additions & 0 deletions demos/common/cpp/models/src/detection_model_ssd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,21 @@ std::shared_ptr<InternalModelData> ModelSSD::preprocess(const InputData& inputDa
return DetectionModel::preprocess(inputData, request);
}

std::shared_ptr<InternalModelData> ModelSSD::preprocess(std::vector<std::shared_ptr<InputData>>::iterator inputDataBegin,
std::vector<std::shared_ptr<InputData>>::iterator inputDataEnd,
ov::InferRequest& request) {
if (inputsNames.size() > 1) {
const auto& imageInfoTensor = request.get_tensor(inputsNames[1]);
const auto info = imageInfoTensor.data<float>();
info[0] = static_cast<float>(netInputHeight);
info[1] = static_cast<float>(netInputWidth);
info[2] = 1;
request.set_tensor(inputsNames[1], imageInfoTensor);
}

return DetectionModel::preprocess(inputDataBegin, inputDataEnd, request);
}

std::unique_ptr<ResultBase> ModelSSD::postprocess(InferenceResult& infResult) {
return outputsNames.size() > 1 ? postprocessMultipleOutputs(infResult) : postprocessSingleOutput(infResult);
}
Expand Down
7 changes: 4 additions & 3 deletions demos/common/cpp/models/src/image_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,15 @@ std::shared_ptr<InternalModelData> ImageModel::preprocess(std::vector<std::share
const size_t width = tensorShape[ov::layout::width_idx(layout)];
const size_t height = tensorShape[ov::layout::height_idx(layout)];
const size_t channels = tensorShape[ov::layout::channels_idx(layout)];
std::cout << "ImageModel::preprocess: batch: " << batch << ", width: " << width << ", height: " << height << ", channels: " << channels << std::endl;
char* memoryBlob = nullptr;
size_t image_index = 0;
bool isMatFloat = false;
int origImg_cols = 0;
int origImg_rows = 0;
size_t image_count = std::distance(inputDataBegin, inputDataEnd);
if (image_count != batch) {
throw std::runtime_error("Image count in preprocess must repeat batch count");
}
for (auto inputDataIt = inputDataBegin; inputDataIt != inputDataEnd; ++inputDataIt ) {
const auto& origImg = (*inputDataIt)->asRef<ImageInputData>().inputImage;
origImg_cols = origImg.cols;
Expand All @@ -75,7 +78,6 @@ std::shared_ptr<InternalModelData> ImageModel::preprocess(std::vector<std::share
img = resizeImageExt(img, width, height, resizeMode, interpolationMode);
}
size_t sizeInBytes = img.total() * img.elemSize();
std::cout << "image size in bytes: " << sizeInBytes << std::endl;
if (!memoryBlob) {
memoryBlob = new char[sizeInBytes * batch]; // intended memory leak
}
Expand All @@ -85,7 +87,6 @@ std::shared_ptr<InternalModelData> ImageModel::preprocess(std::vector<std::share
image_index++;
}

std::cout << "isMatFloat: " << isMatFloat << std::endl;
auto precision = isMatFloat ? ov::element::f32 : ov::element::u8;
auto batched_tensor = ov::Tensor(precision, ov::Shape{ batch, height, width, channels }, memoryBlob);
request.set_tensor(inputsNames[0], batched_tensor);
Expand Down
25 changes: 25 additions & 0 deletions demos/common/cpp/pipelines/include/pipelines/metadata.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,31 @@ struct ImageMetaData : public MetaData {
ImageMetaData(cv::Mat img, std::chrono::steady_clock::time_point timeStamp) : img(img), timeStamp(timeStamp) {}
};

struct ImageBatchMetaData : public MetaData {
std::chrono::steady_clock::time_point timeStamp;
std::vector<std::shared_ptr<ImageMetaData>> metadatas;

ImageBatchMetaData() {}

ImageBatchMetaData(std::vector<cv::Mat>::iterator imagesBeginIt,
const std::vector<cv::Mat>::iterator imagesEndIt,
std::chrono::steady_clock::time_point timeStamp) : timeStamp(timeStamp) {
size_t images_count = std::distance(imagesBeginIt, imagesEndIt);
metadatas.reserve(images_count);
for (; imagesBeginIt != imagesEndIt;) {
metadatas.push_back(std::make_shared<ImageMetaData>(*imagesBeginIt++, timeStamp));
}
}

void add(cv::Mat img, std::chrono::steady_clock::time_point timeStamp) {
metadatas.push_back(std::make_shared<ImageMetaData>(img, timeStamp));
this->timeStamp = timeStamp;
}
void clear() {
metadatas.clear();
}
};

struct ClassificationImageMetaData : public ImageMetaData {
unsigned int groundTruthId;

Expand Down
4 changes: 0 additions & 4 deletions demos/common/cpp/pipelines/src/async_pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,6 @@ int64_t AsyncPipeline::submitData(std::vector<std::shared_ptr<InputData>>::itera
{
const std::lock_guard<std::mutex> lock(mtx);
inferenceMetrics.update(startTime);
std::cout << "callback has been called" << std::endl;
try {
if (ex) {
std::rethrow_exception(ex);
Expand All @@ -108,14 +107,11 @@ int64_t AsyncPipeline::submitData(std::vector<std::shared_ptr<InputData>>::itera

for (const auto& outName : model->getOutputsNames()) {
auto tensor = request.get_tensor(outName);
std::cout << "-S- output tensorName: " << outName << ", tensor ptr: " << reinterpret_cast<void*>(tensor.data()) << ", size: " << tensor.get_size() << std::endl;
result.outputsData.emplace(outName, tensor);
}

completedInferenceResults.emplace(frameID, result);
std::cout << "before setRequestIdle: " << std::endl;
requestsPool->setRequestIdle(request);
std::cout << "after setRequestIdle: " << std::endl;
} catch (...) {
if (!callbackException) {
callbackException = std::current_exception();
Expand Down
Loading