Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
c2a14b8
Improve UT Coverage for TF 3x
zehao-intel Jun 6, 2024
40a1e2e
fix depthconv and sepconv
zehao-intel Jun 6, 2024
1cd24d2
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 6, 2024
eea3029
set qdq instancenorm as no cover
zehao-intel Jun 6, 2024
d1802b0
Merge branch 'zehao/utc' of https://github.com/intel/neural-compresso…
zehao-intel Jun 6, 2024
09ee46c
fix test keras layers
zehao-intel Jun 6, 2024
1f4996b
fix test keras layers
zehao-intel Jun 6, 2024
42076c7
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 6, 2024
42ed3c8
fix test keras layer
zehao-intel Jun 6, 2024
84db7fd
fix tf.py
zehao-intel Jun 6, 2024
85d477a
remove set_tensor ut
zehao-intel Jun 6, 2024
148752f
imporve keras layer and kl algo
zehao-intel Jun 6, 2024
917f192
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 6, 2024
f457216
update graph_converter
zehao-intel Jun 7, 2024
1edcc0c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 7, 2024
8744714
Merge branch 'master' into zehao/utc
chensuyue Jun 12, 2024
5e43c59
collect tf new API coverage
chensuyue Jun 12, 2024
0a5003e
add pt omit path
chensuyue Jun 12, 2024
b3257cf
fix the issue
chensuyue Jun 12, 2024
90d4012
use sv param
zehao-intel Jun 13, 2024
c048cd8
run single case for pytest
chensuyue Jun 13, 2024
4a8152d
update test status show case
chensuyue Jun 13, 2024
dd7a4b5
add comments
chensuyue Jun 13, 2024
12f8628
for debug
chensuyue Jun 13, 2024
e38ae03
for test
chensuyue Jun 13, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
imporve keras layer and kl algo
Signed-off-by: zehao-intel <[email protected]>
  • Loading branch information
zehao-intel committed Jun 6, 2024
commit 148752f877bc1e3d8a226f195acc9b1037e9f843
43 changes: 9 additions & 34 deletions neural_compressor/tensorflow/keras/layers/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,40 +354,15 @@ def get_config(self):
def initialize_int8_conv2d(fp32_layer, q_config):
kwargs = fp32_layer.get_config()

if "name" in kwargs:
del kwargs["name"]
if "filters" in kwargs:
del kwargs["filters"]
if "kernel_size" in kwargs:
del kwargs["kernel_size"]
if "strides" in kwargs:
del kwargs["strides"]
if "padding" in kwargs:
del kwargs["padding"]
if "data_format" in kwargs:
del kwargs["data_format"]
if "dilation_rate" in kwargs:
del kwargs["dilation_rate"]
if "groups" in kwargs:
del kwargs["groups"]
if "activation" in kwargs:
del kwargs["activation"]
if "use_bias" in kwargs:
del kwargs["use_bias"]
if "kernel_initializer" in kwargs:
del kwargs["kernel_initializer"]
if "bias_initializer" in kwargs:
del kwargs["bias_initializer"]
if "kernel_regularizer" in kwargs:
del kwargs["kernel_regularizer"]
if "activity_regularizer" in kwargs:
del kwargs["activity_regularizer"]
if "bias_regularizer" in kwargs:
del kwargs["bias_regularizer"]
if "kernel_constraint" in kwargs:
del kwargs["kernel_constraint"]
if "bias_constraint" in kwargs:
del kwargs["bias_constraint"]
param_list = ["name", "filters", "kernel_size", "strides", "padding",
"data_format", "dilation_rate", "groups", "activation",
"use_bias", "kernel_initializer", "bias_initializer",
"kernel_regularizer", "activity_regularizer", "bias_regularizer",
"kernel_constraint", "bias_constraint"
]
for p in param_list: # pragma: no cover
if p in kwargs:
del kwargs[p]

return QConv2D(
name=fp32_layer.name,
Expand Down
31 changes: 8 additions & 23 deletions neural_compressor/tensorflow/keras/layers/dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,29 +170,14 @@ def get_config(self):
def initialize_int8_dense(fp32_layer, q_config):
kwargs = fp32_layer.get_config()

if "name" in kwargs:
del kwargs["name"]
if "units" in kwargs:
del kwargs["units"]
if "activation" in kwargs:
del kwargs["activation"]
if "use_bias" in kwargs:
del kwargs["use_bias"]
if "kernel_initializer" in kwargs:
del kwargs["kernel_initializer"]
if "bias_initializer" in kwargs:
del kwargs["bias_initializer"]
if "kernel_regularizer" in kwargs:
del kwargs["kernel_regularizer"]
if "activity_regularizer" in kwargs:
del kwargs["activity_regularizer"]
if "bias_regularizer" in kwargs:
del kwargs["bias_regularizer"]
if "kernel_constraint" in kwargs:
del kwargs["kernel_constraint"]
if "bias_constraint" in kwargs:
del kwargs["bias_constraint"]

param_list = ["name", "units", "activation", "use_bias", "kernel_initializer",
"bias_initializer", "kernel_regularizer", "activity_regularizer",
"bias_regularizer", "kernel_constraint", "bias_constraint"
]
for p in param_list: # pragma: no cover
if p in kwargs:
del kwargs[p]

q_layer = QDense(
name=fp32_layer.name,
units=fp32_layer.units,
Expand Down
45 changes: 9 additions & 36 deletions neural_compressor/tensorflow/keras/layers/depthwise_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,42 +372,15 @@ def initialize_int8_depthwise_conv2d(fp32_layer, q_config):
kwargs = fp32_layer.get_config()
q_name = fp32_layer.name

if "name" in kwargs:
del kwargs["name"]
if "kernel_size" in kwargs:
del kwargs["kernel_size"]
if "strides" in kwargs:
del kwargs["strides"]
if "padding" in kwargs:
del kwargs["padding"]
if "depth_multiplier" in kwargs:
del kwargs["depth_multiplier"]
if "data_format" in kwargs:
del kwargs["data_format"]
if "dilation_rate" in kwargs:
del kwargs["dilation_rate"]
if "activation" in kwargs:
del kwargs["activation"]
if "use_bias" in kwargs:
del kwargs["use_bias"]
if "depthwise_initializer" in kwargs:
del kwargs["depthwise_initializer"]
if "bias_initializer" in kwargs:
del kwargs["bias_initializer"]
if "depthwise_regularizer" in kwargs:
del kwargs["depthwise_regularizer"]
if "activity_regularizer" in kwargs:
del kwargs["activity_regularizer"]
if "bias_regularizer" in kwargs:
del kwargs["bias_regularizer"]
if "depthwise_constraint" in kwargs:
del kwargs["depthwise_constraint"]
if "bias_constraint" in kwargs:
del kwargs["bias_constraint"]
if "min_value" in kwargs:
del kwargs["min_value"]
if "max_value" in kwargs:
del kwargs["max_value"]
param_list = ["name", "kernel_size", "strides", "padding", "depth_multiplier",
"data_format", "dilation_rate", "activation",
"use_bias", "depthwise_initializer", "bias_initializer",
"depthwise_regularizer", "activity_regularizer", "bias_regularizer",
"depthwise_constraint", "bias_constraint"
]
for p in param_list: # pragma: no cover
if p in kwargs:
del kwargs[p]

return QDepthwiseConv2D(
name=q_name,
Expand Down
28 changes: 8 additions & 20 deletions neural_compressor/tensorflow/keras/layers/pool2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,16 +215,10 @@ def get_config(self):
def initialize_int8_avgpool(fp32_layer, q_config):
kwargs = fp32_layer.get_config()

if "name" in kwargs:
del kwargs["name"]
if "pool_size" in kwargs:
del kwargs["pool_size"]
if "strides" in kwargs:
del kwargs["strides"]
if "padding" in kwargs:
del kwargs["padding"]
if "data_format" in kwargs:
del kwargs["data_format"]
param_list = ["name", "pool_size", "strides", "padding", "data_format", ]
for p in param_list: # pragma: no cover
if p in kwargs:
del kwargs[p]

q_layer = QAvgPool2D(
name=fp32_layer.name,
Expand All @@ -243,16 +237,10 @@ def initialize_int8_avgpool(fp32_layer, q_config):
def initialize_int8_maxpool(fp32_layer, q_config):
kwargs = fp32_layer.get_config()

if "name" in kwargs:
del kwargs["name"]
if "pool_size" in kwargs:
del kwargs["pool_size"]
if "strides" in kwargs:
del kwargs["strides"]
if "padding" in kwargs:
del kwargs["padding"]
if "data_format" in kwargs:
del kwargs["data_format"]
param_list = ["name", "pool_size", "strides", "padding", "data_format", ]
for p in param_list: # pragma: no cover
if p in kwargs:
del kwargs[p]

q_layer = QMaxPool2D(
name=fp32_layer.name,
Expand Down
54 changes: 10 additions & 44 deletions neural_compressor/tensorflow/keras/layers/separable_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,50 +364,16 @@ def get_config(self):
def initialize_int8_separable_conv2d(fp32_layer, q_config):
kwargs = fp32_layer.get_config()

if "name" in kwargs:
del kwargs["name"]
if "filters" in kwargs:
del kwargs["filters"]
if "kernel_size" in kwargs:
del kwargs["kernel_size"]
if "strides" in kwargs:
del kwargs["strides"]
if "padding" in kwargs:
del kwargs["padding"]
if "data_format" in kwargs:
del kwargs["data_format"]
if "dilation_rate" in kwargs:
del kwargs["dilation_rate"]
if "depth_multiplier" in kwargs:
del kwargs["depth_multiplier"]
if "activation" in kwargs:
del kwargs["activation"]
if "use_bias" in kwargs:
del kwargs["use_bias"]
if "depthwise_initializer" in kwargs:
del kwargs["depthwise_initializer"]
if "pointwise_initializer" in kwargs:
del kwargs["pointwise_initializer"]
if "bias_initializer" in kwargs:
del kwargs["bias_initializer"]
if "depthwise_regularizer" in kwargs:
del kwargs["depthwise_regularizer"]
if "pointwise_regularizer" in kwargs:
del kwargs["pointwise_regularizer"]
if "activity_regularizer" in kwargs:
del kwargs["activity_regularizer"]
if "bias_regularizer" in kwargs:
del kwargs["bias_regularizer"]
if "depthwise_constraint" in kwargs:
del kwargs["depthwise_constraint"]
if "pointwise_constraint" in kwargs:
del kwargs["pointwise_constraint"]
if "bias_constraint" in kwargs:
del kwargs["bias_constraint"]
if "min_value" in kwargs:
del kwargs["min_value"]
if "max_value" in kwargs:
del kwargs["max_value"]
param_list = ["name", "filters", "kernel_size", "strides", "padding",
"data_format", "dilation_rate", "depth_multiplier", "activation",
"use_bias", "depthwise_initializer", "bias_initializer", "pointwise_initializer",
"depthwise_regularizer", "activity_regularizer", "bias_regularizer",
"pointwise_regularizer", "depthwise_constraint", "bias_constraint",
"pointwise_constraint",
]
for p in param_list: # pragma: no cover
if p in kwargs:
del kwargs[p]

return QSeparableConv2D(
name=fp32_layer.name,
Expand Down
2 changes: 1 addition & 1 deletion test/3x/tensorflow/keras/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def build_model2():
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.SeparableConv2D(3, 3, activation="relu"),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.AveragePooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10, name="dense"),
]
Expand Down
29 changes: 29 additions & 0 deletions test/3x/tensorflow/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,35 @@ def test_static_quant_from_dict_advance(self):

self.assertEqual(conv2d_quantized, False)

def test_static_quant_from_dict_advance2(self):
logger.info("test_static_quant_from_dict_advance2")
from neural_compressor.tensorflow import quantize_model
from neural_compressor.tensorflow.utils import DummyDataset

dataset = DummyDataset(shape=(100, 32, 32, 3), label=True)
calib_dataloader = MyDataLoader(dataset=dataset)
fp32_model = self.graph
quant_config = {
"static_quant": {
"global": {
"weight_dtype": "int8",
"weight_sym": True,
"weight_granularity": "per_channel",
"act_dtype": "int8",
"act_sym": True,
"act_granularity": "per_channel",
},
"local": {
"conv1": {
"weight_algorithm": "kl",
"act_algorithm": "kl",
}
},
}
}
qmodel = quantize_model(fp32_model, quant_config, calib_dataloader)
self.assertIsNotNone(qmodel)

def test_static_quant_from_class_advance(self):
logger.info("test_static_quant_from_class_advance")
from neural_compressor.tensorflow import StaticQuantConfig, quantize_model
Expand Down