Skip to content
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions cmake/external/onnxruntime_external_deps.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -598,10 +598,6 @@ if(NOT (onnx_FOUND OR ONNX_FOUND)) # building ONNX from source
endif()
endif()

if (onnxruntime_RUN_ONNX_TESTS)
add_definitions(-DORT_RUN_EXTERNAL_ONNX_TESTS)
endif()

if(onnxruntime_ENABLE_DLPACK)
message(STATUS "dlpack is enabled.")

Expand Down
87 changes: 0 additions & 87 deletions onnxruntime/test/framework/inference_session_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -588,93 +588,6 @@ TEST(InferenceSessionTests, RequestLoadCancellation) {
}
}

#ifdef ORT_RUN_EXTERNAL_ONNX_TESTS
static bool Compare(const InputDefList& f_arg, const InputDefList& s_arg) {
if (f_arg.size() != s_arg.size()) {
std::cout << "Sizes differ: f_arg size: " << f_arg.size() << " s_arg size: " << s_arg.size() << std::endl;
return false;
}

for (size_t i = 0; i < f_arg.size(); ++i) {
const onnxruntime::NodeArg* x = f_arg[i];
const onnxruntime::NodeArg* y = s_arg[i];
if ((x->Shape() == nullptr) ^ (y->Shape() == nullptr)) {
return false;
}
if (!x->Shape()) {
continue;
}
auto x_shape = utils::GetTensorShapeFromTensorShapeProto(*x->Shape());
auto y_shape = utils::GetTensorShapeFromTensorShapeProto(*y->Shape());
if (x->Name() == y->Name() && x_shape == y_shape && *x->Type() == *y->Type()) {
continue;
}
return false;
}

return true;
}

TEST(InferenceSessionTests, ModelMetadata) {
SessionOptions so;

so.session_logid = "InferenceSessionTests.ModelMetadata";
InferenceSession session_object{so, GetEnvironment()};
auto model_uri = ORT_TSTR("../models/opset8/test_squeezenet/model.onnx");
ASSERT_STATUS_OK(session_object.Load(model_uri));

std::shared_ptr<onnxruntime::Model> p_model;
ASSERT_STATUS_OK(onnxruntime::Model::Load(model_uri, p_model, nullptr, DefaultLoggingManager().DefaultLogger()));
const onnxruntime::Graph& graph = p_model->MainGraph();

// 1. first test the model meta
{
auto retval = session_object.GetModelMetadata();
ASSERT_TRUE(retval.first.IsOK());
const ModelMetadata* m = retval.second;
ASSERT_TRUE(m->custom_metadata_map == p_model->MetaData() &&
m->description == p_model->DocString() &&
m->domain == p_model->Domain() &&
m->graph_name == graph.Name() &&
m->producer_name == p_model->ProducerName() &&
m->version == p_model->ModelVersion());
}

{
// 2. test inputs
auto& inputs = graph.GetInputs();
auto weights = graph.GetAllInitializedTensors();

// skip the weights
InputDefList inputs_no_weights;
for (auto& elem : inputs) {
if (weights.find(elem->Name()) != weights.end()) {
continue;
} else {
inputs_no_weights.push_back(elem);
}
}

auto retval = session_object.GetModelInputs();
std::cout << "weights size: " << weights.size()
<< " inputs.size(): " << inputs.size()
<< " from session: " << retval.second->size() << std::endl;
ASSERT_TRUE(retval.first.IsOK());
ASSERT_TRUE(Compare(inputs_no_weights, *retval.second));
}

// 3. test outputs
{
auto retval = session_object.GetModelOutputs();
ASSERT_TRUE(retval.first.IsOK());

auto& outputs = graph.GetOutputs();
retval = session_object.GetModelOutputs();
ASSERT_TRUE(retval.first.IsOK());
ASSERT_TRUE(Compare(outputs, *retval.second));
}
}
#endif
TEST(InferenceSessionTests, CheckRunLogger) {
if constexpr (!SessionOptions::DEFAULT_USE_PER_SESSION_THREADS) {
GTEST_SKIP() << "Skipping the test";
Expand Down
108 changes: 0 additions & 108 deletions onnxruntime/test/ir/onnx_model_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,44 +26,6 @@ class ONNXModelsTest : public ::testing::Test {

std::unique_ptr<logging::Logger> logger_;
};
#ifdef ORT_RUN_EXTERNAL_ONNX_TESTS
// Tests that Resolve() properly clears the state of topological sorted nodes,
// inputs, outputs and valueInfo.
// Assumes the graph passed in has been previously resolved.
static void TestResolve(onnxruntime::Graph& graph) {
GraphViewer graph_viewer(graph);
auto& nodes_before = graph_viewer.GetNodesInTopologicalOrder();
auto& inputs_before = graph.GetInputs();
auto& outputs_before = graph.GetOutputs();
auto& value_info_before = graph.GetValueInfo();

// Touch the graph to force Resolve() to recompute.
graph.SetGraphResolveNeeded();
graph.SetGraphProtoSyncNeeded();
ASSERT_STATUS_OK(graph.Resolve());

GraphViewer graph_viewer_2(graph);
auto& nodes_after = graph_viewer_2.GetNodesInTopologicalOrder();
auto& inputs_after = graph.GetInputs();
auto& outputs_after = graph.GetOutputs();
auto& value_info_after = graph.GetValueInfo();

// Multiple calls to Resolve() should not alter the sorted nodes,
// inputs, outputs and valueInfo. The internal state should be
// cleared.
EXPECT_EQ(nodes_before, nodes_after);
EXPECT_EQ(inputs_before, inputs_after);
EXPECT_EQ(outputs_before, outputs_after);
EXPECT_EQ(value_info_before, value_info_after);
}

TEST_F(ONNXModelsTest, squeeze_net) {
// NOTE: this requires the current directory to be where onnxruntime_ir_UT.exe is located
std::shared_ptr<Model> model;
ASSERT_STATUS_OK(Model::Load(ORT_TSTR("../models/opset8/test_squeezenet/model.onnx"), model, nullptr, *logger_));
TestResolve(model->MainGraph());
}
#endif

TEST_F(ONNXModelsTest, non_existing_model) {
// NOTE: this requires the current directory to be where onnxruntime_ir_UT.exe is located
Expand Down Expand Up @@ -96,76 +58,6 @@ class ONNXModelsTest1 : public ::testing::TestWithParam<const ORTCHAR_T*> {
return oss.str();
}
};
#ifdef ORT_RUN_EXTERNAL_ONNX_TESTS
TEST_F(ONNXModelsTest, bvlc_alexnet_1) {
using ::google::protobuf::io::CodedInputStream;
using ::google::protobuf::io::FileInputStream;
using ::google::protobuf::io::ZeroCopyInputStream;
int fd;
ASSERT_STATUS_OK(Env::Default().FileOpenRd(ORT_TSTR("../models/opset8/test_bvlc_alexnet/model.onnx"), fd));
ASSERT_TRUE(fd > 0);
std::unique_ptr<ZeroCopyInputStream> raw_input(new FileInputStream(fd));
std::unique_ptr<CodedInputStream> coded_input(new CodedInputStream(raw_input.get()));
// Allows protobuf library versions < 3.2.0 to parse messages greater than 64MB.
coded_input->SetTotalBytesLimit(INT_MAX);
ModelProto model_proto;
bool result = model_proto.ParseFromCodedStream(coded_input.get());
coded_input.reset();
raw_input.reset();
EXPECT_TRUE(result);
ASSERT_STATUS_OK(Env::Default().FileClose(fd));

std::shared_ptr<Model> model;
ASSERT_STATUS_OK(Model::Load(ORT_TSTR("../models/opset8/test_bvlc_alexnet/model.onnx"), model, nullptr,
*logger_));

// Check the graph input/output/value_info should have the same size as specified in the model file.
EXPECT_EQ(static_cast<size_t>(model_proto.graph().value_info_size()), model->MainGraph().GetValueInfo().size());
EXPECT_EQ(static_cast<size_t>(model_proto.graph().input_size()), model->MainGraph().GetInputs().size() + model->MainGraph().GetAllInitializedTensors().size());
EXPECT_EQ(static_cast<size_t>(model_proto.graph().output_size()), model->MainGraph().GetOutputs().size());
TestResolve(model->MainGraph());
}

TEST_P(ONNXModelsTest1, LoadFromFile) {
std::shared_ptr<Model> model;
ASSERT_STATUS_OK(Model::Load(GetModelFileName(), model, nullptr,
*logger_));
TestResolve(model->MainGraph());
}

TEST_P(ONNXModelsTest1, LoadFromProtobuf) {
using ::google::protobuf::io::CodedInputStream;
using ::google::protobuf::io::FileInputStream;
using ::google::protobuf::io::ZeroCopyInputStream;
int fd;
ASSERT_STATUS_OK(Env::Default().FileOpenRd(GetModelFileName(), fd));
ASSERT_TRUE(fd > 0);
std::unique_ptr<ZeroCopyInputStream> raw_input(new FileInputStream(fd));
std::unique_ptr<CodedInputStream> coded_input(new CodedInputStream(raw_input.get()));
coded_input->SetTotalBytesLimit(INT_MAX);
ModelProto model_proto;
bool result = model_proto.ParseFromCodedStream(coded_input.get());
coded_input.reset();
raw_input.reset();
ASSERT_TRUE(result);
ASSERT_STATUS_OK(Env::Default().FileClose(fd));
std::shared_ptr<Model> model;
ASSERT_STATUS_OK(Model::Load(std::move(model_proto), model, nullptr,
*logger_));
TestResolve(model->MainGraph());
}

#ifndef DISABLE_CONTRIB_OPS
INSTANTIATE_TEST_SUITE_P(ONNXModelsTests,
ONNXModelsTest1,
::testing::Values(ORT_TSTR("bvlc_alexnet"), ORT_TSTR("bvlc_googlenet"), ORT_TSTR("bvlc_reference_caffenet"), ORT_TSTR("bvlc_reference_rcnn_ilsvrc13"), ORT_TSTR("densenet121"), ORT_TSTR("emotion_ferplus"), ORT_TSTR("inception_v1"), ORT_TSTR("inception_v2"), ORT_TSTR("mnist"), ORT_TSTR("resnet50"), ORT_TSTR("shufflenet"), ORT_TSTR("squeezenet"), ORT_TSTR("tiny_yolov2"), ORT_TSTR("vgg19"), ORT_TSTR("zfnet512")));
#else
INSTANTIATE_TEST_SUITE_P(ONNXModelsTests,
ONNXModelsTest1,
::testing::Values(ORT_TSTR("bvlc_alexnet"), ORT_TSTR("bvlc_googlenet"), ORT_TSTR("bvlc_reference_caffenet"), ORT_TSTR("bvlc_reference_rcnn_ilsvrc13"), ORT_TSTR("densenet121"), ORT_TSTR("emotion_ferplus"), ORT_TSTR("inception_v1"), ORT_TSTR("inception_v2"), ORT_TSTR("mnist"), ORT_TSTR("resnet50"), ORT_TSTR("shufflenet"), ORT_TSTR("squeezenet"), ORT_TSTR("vgg19"), ORT_TSTR("zfnet512")));
#endif

#endif

// test a model that conforms to ONNX IR v4 where there are initializers that are not graph inputs.
// a NodeArg should be created for all initializers in this case.
Expand Down
48 changes: 0 additions & 48 deletions onnxruntime/test/optimizer/resnet50_fusion_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

namespace onnxruntime {
namespace test {
// #define ORT_RUN_EXTERNAL_ONNX_TESTS
// #define MLAS_F16VEC_INTRINSICS_SUPPORTED

#define MODEL_FOLDER ORT_TSTR("testdata/transform/")
Expand All @@ -28,54 +27,7 @@ class ResNet50FusionTests : public ::testing::Test {
}
std::unique_ptr<logging::Logger> logger;
};
#if defined(ORT_RUN_EXTERNAL_ONNX_TESTS)
TEST_F(ResNet50FusionTests, FuseConvIntegrationTest) {
std::basic_string<ORTCHAR_T> fp32_model_path = ORT_TSTR("../models/opset10/Resnet50_Fusion_Testing/resnet50.onnx");
std::shared_ptr<Model> fp32_model;
std::basic_string<ORTCHAR_T> fp16_model_path = ORT_TSTR("../models/opset10/Resnet50_Fusion_Testing_fp16/resnet50.fp16.onnx");
std::shared_ptr<Model> fp16_model;
if (Model::Load(fp32_model_path, fp32_model, nullptr, *logger) != Status::OK()) {
GTEST_SKIP() << "Failed to load model: " << fp32_model_path;
}
if (Model::Load(fp16_model_path, fp16_model, nullptr, *logger) != Status::OK()) {
GTEST_SKIP() << "Failed to load model: " << fp16_model_path;
}
// ASSERT_STATUS_OK(Model::Load(fp32_model_path, fp32_model, nullptr, *logger));
Graph& fp32_graph = fp32_model->MainGraph();
for (auto& node : fp32_model->MainGraph().Nodes()) {
node.SetExecutionProviderType(kCpuExecutionProvider);
}
Graph& fp16_graph = fp16_model->MainGraph();
for (auto& node : fp16_model->MainGraph().Nodes()) {
node.SetExecutionProviderType(kCpuExecutionProvider);
}
// std::cout << "-------Op Counts Before Fusion---------" << std::endl;
std::map<std::string, int> fp32_op_count = CountOpsInGraph(fp32_graph);
std::map<std::string, int> fp16_op_count = CountOpsInGraph(fp16_graph);
for (auto& op : fp32_op_count) {
// std::cout << op.first << " " << op.second << std::endl;
ASSERT_EQ(op.second, fp16_op_count[op.first]);
}

onnxruntime::GraphTransformerManager graph_transformation_mgr_32{5};
ASSERT_STATUS_OK(graph_transformation_mgr_32.Register(std::make_unique<ConvActivationFusion>(), TransformerLevel::Level3));
ASSERT_STATUS_OK(graph_transformation_mgr_32.Register(std::make_unique<ConvAddActivationFusion>(), TransformerLevel::Level3));
ASSERT_STATUS_OK(graph_transformation_mgr_32.ApplyTransformers(fp32_graph, TransformerLevel::Level3, *logger));
ASSERT_STATUS_OK(Model::Save(*fp32_model, ORT_TSTR("resnet50_fused.onnx")));

onnxruntime::GraphTransformerManager graph_transformation_mgr_16{5};
ASSERT_STATUS_OK(graph_transformation_mgr_16.Register(std::make_unique<ConvActivationFusion>(), TransformerLevel::Level3));
ASSERT_STATUS_OK(graph_transformation_mgr_16.Register(std::make_unique<ConvAddActivationFusion>(), TransformerLevel::Level3));
ASSERT_STATUS_OK(graph_transformation_mgr_16.ApplyTransformers(fp16_graph, TransformerLevel::Level3, *logger));
ASSERT_STATUS_OK(Model::Save(*fp16_model, ORT_TSTR("resnet50_fp16_fused.onnx")));
// std::cout << "-------Op Counts After Fusion---------" << std::endl;
fp32_op_count = CountOpsInGraph(fp32_graph);
fp16_op_count = CountOpsInGraph(fp16_graph);
// for (auto& op : fp32_op_count) {
// ASSERT_EQ(op.second, fp16_op_count[op.first]);
// }
}
#endif // defined(ORT_RUN_EXTERNAL_ONNX_TESTS)
TEST_F(ResNet50FusionTests, FuseConvAddReluUnitTest) {
constexpr const ORTCHAR_T* model_uri = MODEL_FOLDER "fusion/conv_add_relu_fp16.onnx";
std::shared_ptr<Model> p_model;
Expand Down
Loading
Loading