Skip to content
Prev Previous commit
Next Next commit
ut coverage
Signed-off-by: violetch24 <[email protected]>
  • Loading branch information
violetch24 committed Jul 17, 2024
commit 841c22a18dfac9a7c30e27d73f916fdc03ea3af4
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def prepare(self, model, example_inputs, inplace=True, *args, **kwargs):
)
# update json file in ipex_config_path; map ipex op_name to pt op_name
self.user_cfg = cfg_to_qconfig(self.quant_config, cfgs, op_infos_from_cfgs, output_tensor_id_op_name)
else:
else: # pragma: no cover
model = model.to("xpu")

model.eval()
Expand Down Expand Up @@ -109,7 +109,7 @@ def prepare(self, model, example_inputs, inplace=True, *args, **kwargs):
from torch.ao.quantization import QConfigMapping

static_qconfig = QConfigMapping().set_global(qconfig)
else:
else: # pragma: no cover
static_qconfig = QConfig(
activation=MinMaxObserver.with_args(qscheme=torch.per_tensor_affine, dtype=torch.quint8),
weight=PerChannelMinMaxObserver.with_args(
Expand Down
2 changes: 1 addition & 1 deletion neural_compressor/torch/algorithms/static_quant/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def check_cfg_and_qconfig(user_cfg, cfgs, op_infos_from_cfgs, output_tensor_ids_
return cfgs, ori_user_cfg


def generate_xpu_qconfig(tune_cfg):
def generate_xpu_qconfig(tune_cfg): # pragma: no cover
# qconfig observer & config constants for ipex-xpu
from torch.ao.quantization import HistogramObserver, MinMaxObserver, QConfig

Expand Down
2 changes: 1 addition & 1 deletion neural_compressor/torch/quantization/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -1126,7 +1126,7 @@ def get_model_info_for_ipex(model: torch.nn.Module, example_inputs) -> List[Tupl
_, _, _, _, model_info = get_quantizable_ops_recursively(model, example_inputs=example_inputs)
return model_info

def get_model_info_for_ipex_xpu(self, model: torch.nn.Module) -> List[Tuple[str, Callable]]:
def get_model_info_for_ipex_xpu(self, model: torch.nn.Module) -> List[Tuple[str, Callable]]: # pragma: no cover
if self.model_info:
return self.model_info
else:
Expand Down