Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
c2a14b8
Improve UT Coverage for TF 3x
zehao-intel Jun 6, 2024
40a1e2e
fix depthconv and sepconv
zehao-intel Jun 6, 2024
1cd24d2
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 6, 2024
eea3029
set qdq instancenorm as no cover
zehao-intel Jun 6, 2024
d1802b0
Merge branch 'zehao/utc' of https://github.com/intel/neural-compresso…
zehao-intel Jun 6, 2024
09ee46c
fix test keras layers
zehao-intel Jun 6, 2024
1f4996b
fix test keras layers
zehao-intel Jun 6, 2024
42076c7
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 6, 2024
42ed3c8
fix test keras layer
zehao-intel Jun 6, 2024
84db7fd
fix tf.py
zehao-intel Jun 6, 2024
85d477a
remove set_tensor ut
zehao-intel Jun 6, 2024
148752f
imporve keras layer and kl algo
zehao-intel Jun 6, 2024
917f192
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 6, 2024
f457216
update graph_converter
zehao-intel Jun 7, 2024
1edcc0c
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jun 7, 2024
8744714
Merge branch 'master' into zehao/utc
chensuyue Jun 12, 2024
5e43c59
collect tf new API coverage
chensuyue Jun 12, 2024
0a5003e
add pt omit path
chensuyue Jun 12, 2024
b3257cf
fix the issue
chensuyue Jun 12, 2024
90d4012
use sv param
zehao-intel Jun 13, 2024
c048cd8
run single case for pytest
chensuyue Jun 13, 2024
4a8152d
update test status show case
chensuyue Jun 13, 2024
dd7a4b5
add comments
chensuyue Jun 13, 2024
12f8628
for debug
chensuyue Jun 13, 2024
e38ae03
for test
chensuyue Jun 13, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fix depthconv and sepconv
Signed-off-by: zehao-intel <[email protected]>
  • Loading branch information
zehao-intel committed Jun 6, 2024
commit 40a1e2e88c5322add74d132d6ceaf9df1c1af75e
4 changes: 2 additions & 2 deletions neural_compressor/tensorflow/keras/layers/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
else:
from keras.layers.convolutional.base_conv import Conv # pylint: disable=E0401

if version1_gte_version2(tf.__version__, "2.16.1"):
if version1_gte_version2(tf.__version__, "2.16.1"): # pragma: no cover

class QConv2D(BaseConv):
def __init__(
Expand Down Expand Up @@ -190,7 +190,7 @@ def get_config(self):

return config

else: # pragma: no cover
else:

class QConv2D(Conv):
def __init__(
Expand Down
8 changes: 2 additions & 6 deletions neural_compressor/tensorflow/keras/layers/depthwise_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,10 @@

if version1_gte_version2(tf.__version__, "2.16.1"):

class QDepthwiseConv2D(BaseDepthwiseConv):
class QDepthwiseConv2D(BaseDepthwiseConv): # pragma: no cover
def __init__(
self,
kernel_size,
min_value,
max_value,
strides=(1, 1),
padding="valid",
depth_multiplier=1,
Expand Down Expand Up @@ -189,14 +187,12 @@ def get_config(self):

return config

else: # pragma: no cover
else:

class QDepthwiseConv2D(DepthwiseConv):
def __init__(
self,
kernel_size,
min_value,
max_value,
strides=(1, 1),
padding="valid",
depth_multiplier=1,
Expand Down
8 changes: 2 additions & 6 deletions neural_compressor/tensorflow/keras/layers/separable_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,13 @@
from keras.layers.convolutional.base_separable_conv import SeparableConv # pylint: disable=E0401
from keras.utils import conv_utils # pylint: disable=E0401

if version1_gte_version2(tf.__version__, "2.16.1"):
if version1_gte_version2(tf.__version__, "2.16.1"): # pragma: no cover

class QSeparableConv2D(BaseSeparableConv):
def __init__(
self,
filters,
kernel_size,
min_value,
max_value,
strides=(1, 1),
padding="valid",
data_format=None,
Expand Down Expand Up @@ -198,15 +196,13 @@ def get_config(self):

return config

else: # pragma: no cover
else:

class QSeparableConv2D(SeparableConv):
def __init__(
self,
filters,
kernel_size,
min_value,
max_value,
strides=(1, 1),
padding="valid",
data_format=None,
Expand Down
149 changes: 149 additions & 0 deletions test/3x/tensorflow/keras/test_layers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2024 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import os
import shutil
import unittest

import keras
import numpy as np
import tensorflow as tf

from neural_compressor.common import Logger
from neural_compressor.tensorflow.utils import version1_gte_version2

logger = Logger().get_logger()


def build_model():
# Load MNIST dataset
mnist = keras.datasets.mnist

# 60000 images in train and 10000 images in test, but we don't need so much for ut
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images, train_labels = train_images[:1000], train_labels[:1000]
test_images, test_labels = test_images[:200], test_labels[:200]

# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0

# Define the model architecture.
model = keras.Sequential(
[
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.DepthwiseConv2D(3, 3, activation='relu', name="conv2d")(x)
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10, name="dense"),
]
)
# Train the digit classification model
model.compile(
optimizer="adam", loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"]
)

model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)

_, baseline_model_accuracy = model.evaluate(test_images, test_labels, verbose=0)

print("Baseline test accuracy:", baseline_model_accuracy)
if version1_gte_version2(tf.__version__, "2.16.1"):
model.save("baseline_model.keras")
else:
model.save("baseline_model")


class Dataset(object):
def __init__(self, batch_size=1):
self.batch_size = batch_size
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images, train_labels = train_images[:1000], train_labels[:1000]
test_images, test_labels = test_images[:200], test_labels[:200]
# Normalize the input image so that each pixel value is between 0 to 1.
self.train_images = train_images / 255.0
self.test_images = test_images / 255.0
self.train_labels = train_labels
self.test_labels = test_labels

def __len__(self):
return len(self.test_images)

def __getitem__(self, idx):
return self.test_images[idx], self.test_labels[idx]


class MyDataloader:
def __init__(self, dataset, batch_size=1):
self.dataset = dataset
self.batch_size = batch_size
self.length = math.ceil(len(dataset) / self.batch_size)

def __iter__(self):
for _, (images, labels) in enumerate(self.dataset):
images = np.expand_dims(images, axis=0)
labels = np.expand_dims(labels, axis=0)
yield (images, labels)

def __len__(self):
return self.length


class TestTF3xNewApi(unittest.TestCase):
@classmethod
def setUpClass(self):
build_model()
os.environ["ITEX_ONEDNN_GRAPH"] = "1"
self.fp32_model_path = (
"baseline_model.keras" if version1_gte_version2(tf.__version__, "2.16.1") else "baseline_model"
)

@classmethod
def tearDownClass(self):
if self.fp32_model_path.endswith(".keras"):
os.remove(self.fp32_model_path)
else:
shutil.rmtree(self.fp32_model_path, ignore_errors=True)
os.environ["ITEX_ONEDNN_GRAPH"] = "0"

def test_static_quant_from_dict_default(self):
logger.info("test_static_quant_from_dict_default")
from neural_compressor.tensorflow import quantize_model
from neural_compressor.tensorflow.keras import get_default_static_quant_config

calib_dataloader = MyDataloader(dataset=Dataset())
fp32_model = keras.models.load_model(self.fp32_model_path)
qmodel = quantize_model(fp32_model, get_default_static_quant_config(), calib_dataloader)
self.assertIsNotNone(qmodel)

for layer in qmodel.layers:
if layer.name == "conv2d":
self.assertEqual(layer.__class__.__name__, "QDepthwiseConv2D")
break



if __name__ == "__main__":
unittest.main()