Skip to content
Prev Previous commit
Next Next commit
fix bug
Signed-off-by: xin3he <[email protected]>
  • Loading branch information
xin3he committed Jun 21, 2024
commit c7808e44d5ccc2390503a1517e1d8b01e6f4db14
2 changes: 1 addition & 1 deletion test/3x/torch/quantization/weight_only/test_rtn.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ def test_double_quant_params(self, dtype, double_quant_bits, double_quant_group_
out = model(self.example_inputs)[0]
atol_true = (out - self.q_label).amax()
# compare atol, this case is an ideal case.
if not (dtype, double_quant_bits, double_quant_group_size) == (256, 6, "nf4"):
if not (dtype, double_quant_bits, double_quant_group_size) == ("nf4", 6, 256):
assert (
atol_false < atol_true
), "asym for double quant should have smaller atol because scales is bigger than zero, please double check."
Expand Down