Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion neural_compressor/torch/algorithms/weight_only/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -1105,7 +1105,11 @@ def __iter__(self):
if not args:
yield kwargs
elif not kwargs:
yield args
# case: tensor
if len(args) == 1:
yield args[0]
else:
yield args
else:
yield args, kwargs

Expand Down
7 changes: 3 additions & 4 deletions neural_compressor/torch/quantization/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,7 @@ def __init__(
minmax_lr: float = None,
low_gpu_mem_usage: bool = True,
iters: int = 200,
seqlen: int = 2048,
seqlen: int = 512,
n_samples: int = 512,
sampler: str = "rand",
seed: int = 42,
Expand Down Expand Up @@ -1490,8 +1490,7 @@ def get_woq_tuning_config() -> list:
the list of WOQ quant config.
"""
RTN_G32ASYM = RTNConfig(use_sym=False, group_size=32)
AUTO_ROUND_CONFIG = AutoRoundConfig(use_sym=False, group_size=32)
GPTQ_G32ASYM = GPTQConfig(use_sym=False, group_size=32)
GPTQ_G32ASYM_DISABLE_LAST_LINEAR = GPTQConfig(use_sym=False).set_local("*.lm_head", GPTQConfig(dtype="fp32"))
GPTQ_G128ASYM = GPTQConfig(group_size=128, use_sym=False)
AWQ_G32ASYM = AWQConfig(use_sym=False, group_size=32)
return [RTN_G32ASYM, GPTQ_G32ASYM, GPTQ_G32ASYM_DISABLE_LAST_LINEAR, GPTQ_G128ASYM, AWQ_G32ASYM]
return [RTN_G32ASYM, AUTO_ROUND_CONFIG, GPTQ_G32ASYM, AWQ_G32ASYM]