Skip to content
Merged
Changes from 1 commit
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
c2a14b8
Improve UT Coverage for TF 3x
zehao-intel Jun 6, 2024
40a1e2e
fix depthconv and sepconv
zehao-intel Jun 6, 2024
1cd24d2
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 6, 2024
eea3029
set qdq instancenorm as no cover
zehao-intel Jun 6, 2024
d1802b0
Merge branch 'zehao/utc' of https://github.com/intel/neural-compresso…
zehao-intel Jun 6, 2024
09ee46c
fix test keras layers
zehao-intel Jun 6, 2024
1f4996b
fix test keras layers
zehao-intel Jun 6, 2024
42076c7
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 6, 2024
42ed3c8
fix test keras layer
zehao-intel Jun 6, 2024
84db7fd
fix tf.py
zehao-intel Jun 6, 2024
85d477a
remove set_tensor ut
zehao-intel Jun 6, 2024
148752f
imporve keras layer and kl algo
zehao-intel Jun 6, 2024
917f192
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 6, 2024
f457216
update graph_converter
zehao-intel Jun 7, 2024
1edcc0c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 7, 2024
8744714
Merge branch 'master' into zehao/utc
chensuyue Jun 12, 2024
5e43c59
collect tf new API coverage
chensuyue Jun 12, 2024
0a5003e
add pt omit path
chensuyue Jun 12, 2024
b3257cf
fix the issue
chensuyue Jun 12, 2024
90d4012
use sv param
zehao-intel Jun 13, 2024
c048cd8
run single case for pytest
chensuyue Jun 13, 2024
4a8152d
update test status show case
chensuyue Jun 13, 2024
dd7a4b5
add comments
chensuyue Jun 13, 2024
12f8628
for debug
chensuyue Jun 13, 2024
e38ae03
for test
chensuyue Jun 13, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
update graph_converter
Signed-off-by: zehao-intel <[email protected]>
  • Loading branch information
zehao-intel committed Jun 7, 2024
commit f4572167bbeed7b5572f2881b58763be8ef9529f
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def _inference(self, model):
for idx, (inputs, labels) in enumerate(self.data_loader):
if len(input_tensor) == 1:
feed_dict = {}
if isinstance(inputs, dict) or isinstance(inputs, OrderedDict) or isinstance(inputs, UserDict):
if isinstance(inputs, dict) or isinstance(inputs, OrderedDict) or isinstance(inputs, UserDict): # pragma: no cover
for name in inputs:
for tensor in input_tensor:
pos = tensor.name.rfind(":")
Expand Down Expand Up @@ -341,7 +341,7 @@ def _inference_llm(self, model):
if idx >= self.calib_iteration:
break

def _check_tf_version(self):
def _check_tf_version(self): # pragma: no cover
"""Check if the installed tensorflow version is supported."""
is_supported_version = False
is_sprbase_version = False
Expand Down Expand Up @@ -520,7 +520,7 @@ def _get_fp32_print_node_names(self, specified_op_list):
for i in target_conv_op:
if specified_op_list and i not in specified_op_list:
continue
if node_name_mapping[i + "_eightbit_quantized_conv"].op == "QuantizedConv2DWithBiasSumAndRelu":
if node_name_mapping[i + "_eightbit_quantized_conv"].op == "QuantizedConv2DWithBiasSumAndRelu": # pragma: no cover
start_index = sorted_node_names.index(i)
for index, value in enumerate(sorted_node_names[start_index:]):
if (
Expand Down Expand Up @@ -549,7 +549,7 @@ def _get_fp32_print_node_names(self, specified_op_list):
self._fp32_model.graph_def = fp32_graph_def
return self._fp32_model

def _search_y_pattern_for_itex(self):
def _search_y_pattern_for_itex(self): # pragma: no cover
"""Search the Y pattern for itex and return the op name."""
g = GraphAnalyzer()
g.graph = self._fp32_model.graph_def
Expand Down