From d5d55ab50ec01614095bfff34b434817e2ff8eab Mon Sep 17 00:00:00 2001 From: Zhiyu Cheng Date: Wed, 11 Feb 2026 22:14:55 -0800 Subject: [PATCH 1/7] support qwen3.5 MOE PTQ Signed-off-by: Zhiyu Cheng --- modelopt/torch/export/layer_utils.py | 7 +- modelopt/torch/export/unified_export_hf.py | 48 +++++-- .../torch/quantization/plugins/huggingface.py | 120 ++++++++++++++++++ modelopt/torch/utils/dataset_utils.py | 4 +- 4 files changed, 168 insertions(+), 11 deletions(-) diff --git a/modelopt/torch/export/layer_utils.py b/modelopt/torch/export/layer_utils.py index 9346e074b..8f3b364df 100755 --- a/modelopt/torch/export/layer_utils.py +++ b/modelopt/torch/export/layer_utils.py @@ -339,6 +339,7 @@ def is_moe(module: nn.Module) -> bool: "Qwen2MoeSparseMoeBlock".lower(), "Qwen3MoeSparseMoeBlock".lower(), "Qwen3NextSparseMoeBlock".lower(), + "Qwen3_5MoeSparseMoeBlock".lower(), ] ) @@ -999,6 +1000,7 @@ def module_match_name_list(module, name_list): "Qwen2MoeSparseMoeBlock", "Qwen3MoeSparseMoeBlock", "Qwen3NextSparseMoeBlock", + "Qwen3_5MoeSparseMoeBlock", "DeepseekMoE", ], ): @@ -1134,7 +1136,10 @@ def set_expert_quantizer_amax( # Apply target amax to quantizers that need it for module, attr_name, quantizer in all_quantizers: # Check if quantizer needs amax (use property for consistency) - needs_amax = getattr(quantizer, "amax", None) is None + # Also treat zero amax as needing recalibration — a zero amax is never valid + # and indicates the quantizer wasn't activated during calibration + amax = getattr(quantizer, "amax", None) + needs_amax = amax is None or (isinstance(amax, torch.Tensor) and torch.all(amax == 0)) # Skip dynamic quantizers for input quantizers if "input_quantizer" in attr_name and getattr(quantizer, "_dynamic", False): diff --git a/modelopt/torch/export/unified_export_hf.py b/modelopt/torch/export/unified_export_hf.py index 5703f4515..ceccb2329 100644 --- a/modelopt/torch/export/unified_export_hf.py +++ b/modelopt/torch/export/unified_export_hf.py @@ -574,7 +574,7 @@ def _process_quantized_modules( """ fsdp_module_to_reshard = None - for _, sub_module in model.named_modules(): + for name, sub_module in model.named_modules(): # Optimization to perform resharding only once per decoder layer to avoid extra communication overhead if isinstance(sub_module, FSDPModule): # Every time we encounter a new FSDPModule, the previous decoder layer is fully processed. @@ -593,8 +593,14 @@ def _process_quantized_modules( sub_module.unpack_weight() if get_quantization_format(sub_module) != QUANTIZATION_NONE: if is_quantlinear(sub_module): - with fsdp2_aware_weight_update(model, sub_module, reshard=False): - _export_quantized_weight(sub_module, dtype) + try: + with fsdp2_aware_weight_update(model, sub_module, reshard=False): + _export_quantized_weight(sub_module, dtype) + except AssertionError as e: + raise AssertionError( + f"Failed to export module '{name}' " + f"(type={type(sub_module).__name__}): {e}" + ) from e elif ( "Llama4TextExperts" in type(sub_module).__name__ or "GptOssExperts" in type(sub_module).__name__ @@ -670,6 +676,16 @@ def _export_transformers_checkpoint( modules=[linear_module], quantizer_attrs=["input_quantizer"], ) + elif "Qwen3_5MoeExperts" in type(sub_module.experts).__name__: + # Handle Qwen3.5 MoE experts which use gate_proj/up_proj/down_proj ModuleLists + for expert_linear_name in ["gate_proj", "up_proj", "down_proj"]: + if hasattr(sub_module.experts, expert_linear_name): + linear_modulelist = getattr(sub_module.experts, expert_linear_name) + if hasattr(linear_modulelist, "__iter__"): + set_expert_quantizer_amax( + modules=list(linear_modulelist), + quantizer_attrs=["input_quantizer"], + ) elif isinstance(sub_module.experts, collections.abc.Iterable): # For other MoE models (like Mixtral) with iterable experts try: @@ -1013,11 +1029,27 @@ def export_hf_checkpoint( model.hf_quantizer = None # Save model - model.save_pretrained( - export_dir, - state_dict={**post_state_dict, **(extra_state_dict or {})}, - save_modelopt_state=save_modelopt_state, - ) + # Temporarily disable revert_weight_conversion if available — it doesn't handle + # quantized state dicts (scalar scale tensors have 0 dimensions, causing IndexError). + _patched_revert = False + try: + import transformers.core_model_loading as _cml + + _original_revert = _cml.revert_weight_conversion + _cml.revert_weight_conversion = lambda model, state_dict: state_dict + _patched_revert = True + except (ImportError, AttributeError): + pass + + try: + model.save_pretrained( + export_dir, + state_dict={**post_state_dict, **(extra_state_dict or {})}, + save_modelopt_state=save_modelopt_state, + ) + finally: + if _patched_revert: + _cml.revert_weight_conversion = _original_revert original_config = f"{export_dir}/config.json" config_data = {} diff --git a/modelopt/torch/quantization/plugins/huggingface.py b/modelopt/torch/quantization/plugins/huggingface.py index a29d7c754..40272d2b3 100644 --- a/modelopt/torch/quantization/plugins/huggingface.py +++ b/modelopt/torch/quantization/plugins/huggingface.py @@ -653,6 +653,86 @@ def forward( return next_states +class _QuantQwen3_5MoeExperts(QuantModule): + def _setup(self): + """Modify the Qwen3_5MoeExperts by using nn.Linear layers.""" + from accelerate import init_empty_weights + + dtype, device = self.gate_up_proj.dtype, self.gate_up_proj.device + + def _copy_weight(module, weight): + module.to_empty(device=device) + with torch.no_grad(): + module.weight.data = weight.detach().data.to(dtype=dtype, device=device) + + expert_dim = self.intermediate_dim + + with init_empty_weights(): + gate_proj = nn.ModuleList( + [ + nn.Linear(self.hidden_dim, expert_dim, bias=False) + for _ in range(self.num_experts) + ] + ) + up_proj = nn.ModuleList( + [ + nn.Linear(self.hidden_dim, expert_dim, bias=False) + for _ in range(self.num_experts) + ] + ) + down_proj = nn.ModuleList( + [ + nn.Linear(expert_dim, self.hidden_dim, bias=False) + for _ in range(self.num_experts) + ] + ) + + for idx in range(self.num_experts): + # gate_up_proj shape: (num_experts, 2*intermediate_dim, hidden_dim) + # Already in (out_features, in_features) format, no transpose needed + _copy_weight(gate_proj[idx], self.gate_up_proj[idx, :expert_dim, :]) + _copy_weight(up_proj[idx], self.gate_up_proj[idx, expert_dim:, :]) + # down_proj shape: (num_experts, hidden_dim, intermediate_dim) + # Already in (out_features, in_features) format + _copy_weight(down_proj[idx], self.down_proj[idx]) + + delattr(self, "gate_up_proj") + delattr(self, "down_proj") + self.gate_proj = gate_proj + self.up_proj = up_proj + self.down_proj = down_proj + + def forward( + self, + hidden_states: torch.Tensor, + top_k_index: torch.Tensor, + top_k_weights: torch.Tensor, + ) -> torch.Tensor: + final_hidden_states = torch.zeros_like(hidden_states) + with torch.no_grad(): + expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts) + expert_mask = expert_mask.permute(2, 1, 0) + expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() + for expert_idx in expert_hit: + expert_idx = expert_idx[0] + if expert_idx == self.num_experts: + continue + with torch.no_grad(): + top_k_pos, token_idx = torch.where(expert_mask[expert_idx]) + current_state = hidden_states[token_idx] + gate = self.gate_proj[expert_idx](current_state) + up = self.up_proj[expert_idx](current_state) + current_hidden_states = self.act_fn(gate) * up + current_hidden_states = self.down_proj[expert_idx](current_hidden_states) + current_hidden_states = ( + current_hidden_states * top_k_weights[token_idx, top_k_pos, None] + ) + final_hidden_states.index_add_( + 0, token_idx, current_hidden_states.to(final_hidden_states.dtype) + ) + return final_hidden_states + + class _QuantDbrxFFN(_QuantSparseMoe): @property def num_experts(self): @@ -797,6 +877,46 @@ def unpack_weight(self): pass +class _QuantQwen3_5MoeSparseMoeBlock(_QuantSparseMoe): + """Qwen3.5 MoE stores top_k/num_experts in the router (self.gate), not as direct attributes. + + We override forward instead of just bridging attributes because the router (self.gate) + uses its own top_k internally for routing decisions. We must modify self.gate.top_k + directly so all experts see calibration data. + """ + + def _setup(self): + self.num_experts = self.experts.num_experts + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + if any(getattr(m, "_if_calib", False) for m in self.experts.modules()): + # Force all tokens to all experts during calibration + original_top_k = self.gate.top_k + self.gate.top_k = self.num_experts + super(_QuantSparseMoe, self).forward(hidden_states) + self.gate.top_k = original_top_k + return super(_QuantSparseMoe, self).forward(hidden_states) + + +try: + from transformers.models.qwen3_5_moe.modeling_qwen3_5_moe import ( + Qwen3_5MoeExperts, + Qwen3_5MoeSparseMoeBlock, + ) + + if Qwen3_5MoeSparseMoeBlock not in QuantModuleRegistry: + QuantModuleRegistry.register({Qwen3_5MoeSparseMoeBlock: "hf.Qwen3_5MoeSparseMoeBlock"})( + _QuantQwen3_5MoeSparseMoeBlock + ) + + if Qwen3_5MoeExperts not in QuantModuleRegistry: + QuantModuleRegistry.register({Qwen3_5MoeExperts: "hf.Qwen3_5MoeExperts"})( + _QuantQwen3_5MoeExperts + ) +except ImportError: + pass + + class _QuantGptOssExperts(_QuantFunctionalMixin): """Quantized wrapper for `transformers.GptOssExperts`. diff --git a/modelopt/torch/utils/dataset_utils.py b/modelopt/torch/utils/dataset_utils.py index 16bff49c2..7718b2126 100644 --- a/modelopt/torch/utils/dataset_utils.py +++ b/modelopt/torch/utils/dataset_utils.py @@ -224,7 +224,7 @@ def get_dataset_dataloader( A instance of dataloader. """ assert tokenizer is not None, "Please provide a tokenizer." - # batch_encode_plus will modify the tokenizer in place, so we need to clone it. + # Tokenizer encoding may modify the tokenizer in place, so we need to clone it. tokenizer = copy.deepcopy(tokenizer) if tokenizer.padding_side != "left": @@ -247,7 +247,7 @@ def get_dataset_dataloader( samples = get_dataset_samples(ds_name, num_sample) all_samples.extend(samples) - batch_encoded = tokenizer.batch_encode_plus( + batch_encoded = tokenizer( all_samples, return_tensors="pt", padding=True, From 5079b81a71b7a01279cea9301d6f799638967606 Mon Sep 17 00:00:00 2001 From: Zhiyu Cheng Date: Wed, 11 Feb 2026 22:27:43 -0800 Subject: [PATCH 2/7] update Signed-off-by: Zhiyu Cheng --- modelopt/torch/export/layer_utils.py | 2 +- modelopt/torch/export/unified_export_hf.py | 28 ++++++++++++++-------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/modelopt/torch/export/layer_utils.py b/modelopt/torch/export/layer_utils.py index 8f3b364df..2eea6c052 100755 --- a/modelopt/torch/export/layer_utils.py +++ b/modelopt/torch/export/layer_utils.py @@ -1745,7 +1745,7 @@ def _split_fused_qkv_weight_and_scaling( qkv_in = weight.shape[-1] if weight_dim > 1 else 1 - num_kv_heads = num_kv_heads if num_kv_heads else num_heads + num_kv_heads = num_kv_heads or num_heads assert num_heads % num_kv_heads == 0, ( f"num_heads({num_heads}) must be divisible by num_kv_heads({num_kv_heads}))." ) diff --git a/modelopt/torch/export/unified_export_hf.py b/modelopt/torch/export/unified_export_hf.py index ceccb2329..42c32982a 100644 --- a/modelopt/torch/export/unified_export_hf.py +++ b/modelopt/torch/export/unified_export_hf.py @@ -1031,15 +1031,23 @@ def export_hf_checkpoint( # Save model # Temporarily disable revert_weight_conversion if available — it doesn't handle # quantized state dicts (scalar scale tensors have 0 dimensions, causing IndexError). - _patched_revert = False - try: - import transformers.core_model_loading as _cml + # We must patch both the source module and the importing module since + # modeling_utils does `from core_model_loading import revert_weight_conversion`. + _patches = [] + _noop = lambda model, state_dict: state_dict + for _mod_path in [ + "transformers.core_model_loading", + "transformers.modeling_utils", + ]: + try: + import importlib - _original_revert = _cml.revert_weight_conversion - _cml.revert_weight_conversion = lambda model, state_dict: state_dict - _patched_revert = True - except (ImportError, AttributeError): - pass + _mod = importlib.import_module(_mod_path) + if hasattr(_mod, "revert_weight_conversion"): + _patches.append((_mod, getattr(_mod, "revert_weight_conversion"))) + setattr(_mod, "revert_weight_conversion", _noop) + except (ImportError, AttributeError): + pass try: model.save_pretrained( @@ -1048,8 +1056,8 @@ def export_hf_checkpoint( save_modelopt_state=save_modelopt_state, ) finally: - if _patched_revert: - _cml.revert_weight_conversion = _original_revert + for _mod, _original in _patches: + _mod.revert_weight_conversion = _original original_config = f"{export_dir}/config.json" config_data = {} From 199cd79bfa6707989ed0b428a492979268b72a21 Mon Sep 17 00:00:00 2001 From: Zhiyu Cheng Date: Fri, 13 Feb 2026 16:58:27 -0800 Subject: [PATCH 3/7] swap order to export original tokenizer files Signed-off-by: Zhiyu Cheng --- examples/llm_ptq/hf_ptq.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/llm_ptq/hf_ptq.py b/examples/llm_ptq/hf_ptq.py index d9a6ca893..fc1043c05 100755 --- a/examples/llm_ptq/hf_ptq.py +++ b/examples/llm_ptq/hf_ptq.py @@ -639,9 +639,6 @@ def export_quantized( extra_state_dict=mtp_state_dict, ) - # Copy custom model files (Python files and JSON configs) if trust_remote_code is used - copy_custom_model_files(args.pyt_ckpt_path, export_path, args.trust_remote_code) - # Restore default padding and export the tokenizer as well. if tokenizer is not None: tokenizer.padding_side = default_padding_side @@ -649,6 +646,12 @@ def export_quantized( tokenizer.pad_token = default_pad_token tokenizer.save_pretrained(export_path) + # Copy custom model files (Python files and JSON configs) if trust_remote_code is used. + # This must run AFTER tokenizer.save_pretrained() so original tokenizer files + # from the source checkpoint take precedence over regenerated ones (which may + # differ in format due to newer transformers versions). + copy_custom_model_files(args.pyt_ckpt_path, export_path, args.trust_remote_code) + end_time = time.time() print( f"Quantized model exported to: {export_path}. Total time used {end_time - start_time}s" From f1376089305bfea1404dce949893f2d8bae9bfa0 Mon Sep 17 00:00:00 2001 From: Zhiyu Cheng Date: Fri, 13 Feb 2026 16:59:07 -0800 Subject: [PATCH 4/7] swap order to export original tokenizer files Signed-off-by: Zhiyu Cheng --- modelopt/torch/export/unified_export_hf.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modelopt/torch/export/unified_export_hf.py b/modelopt/torch/export/unified_export_hf.py index 42c32982a..5935e018d 100644 --- a/modelopt/torch/export/unified_export_hf.py +++ b/modelopt/torch/export/unified_export_hf.py @@ -598,8 +598,7 @@ def _process_quantized_modules( _export_quantized_weight(sub_module, dtype) except AssertionError as e: raise AssertionError( - f"Failed to export module '{name}' " - f"(type={type(sub_module).__name__}): {e}" + f"Failed to export module '{name}' (type={type(sub_module).__name__}): {e}" ) from e elif ( "Llama4TextExperts" in type(sub_module).__name__ From f0326e59c832386e13aba62a4527ae2673d566fb Mon Sep 17 00:00:00 2001 From: Zhiyu Cheng Date: Mon, 16 Feb 2026 17:13:34 -0800 Subject: [PATCH 5/7] adopt *experts.{id}.* naming pattern Signed-off-by: Zhiyu Cheng --- modelopt/torch/export/unified_export_hf.py | 10 --- .../torch/quantization/plugins/huggingface.py | 69 ++++++++++++------- 2 files changed, 45 insertions(+), 34 deletions(-) diff --git a/modelopt/torch/export/unified_export_hf.py b/modelopt/torch/export/unified_export_hf.py index 5935e018d..dd069cacf 100644 --- a/modelopt/torch/export/unified_export_hf.py +++ b/modelopt/torch/export/unified_export_hf.py @@ -675,16 +675,6 @@ def _export_transformers_checkpoint( modules=[linear_module], quantizer_attrs=["input_quantizer"], ) - elif "Qwen3_5MoeExperts" in type(sub_module.experts).__name__: - # Handle Qwen3.5 MoE experts which use gate_proj/up_proj/down_proj ModuleLists - for expert_linear_name in ["gate_proj", "up_proj", "down_proj"]: - if hasattr(sub_module.experts, expert_linear_name): - linear_modulelist = getattr(sub_module.experts, expert_linear_name) - if hasattr(linear_modulelist, "__iter__"): - set_expert_quantizer_amax( - modules=list(linear_modulelist), - quantizer_attrs=["input_quantizer"], - ) elif isinstance(sub_module.experts, collections.abc.Iterable): # For other MoE models (like Mixtral) with iterable experts try: diff --git a/modelopt/torch/quantization/plugins/huggingface.py b/modelopt/torch/quantization/plugins/huggingface.py index 40272d2b3..94e8a67ec 100644 --- a/modelopt/torch/quantization/plugins/huggingface.py +++ b/modelopt/torch/quantization/plugins/huggingface.py @@ -653,9 +653,27 @@ def forward( return next_states +class _Qwen3_5MoeExpertModule(nn.Module): + """Container for a single Qwen3.5 MoE expert's linear layers. + + Produces the naming pattern: experts.{id}.gate_proj.weight + (consistent with standard Qwen3 MoE per-expert module structure). + """ + + def __init__(self, hidden_dim: int, expert_dim: int): + super().__init__() + self.gate_proj = nn.Linear(hidden_dim, expert_dim, bias=False) + self.up_proj = nn.Linear(hidden_dim, expert_dim, bias=False) + self.down_proj = nn.Linear(expert_dim, hidden_dim, bias=False) + + class _QuantQwen3_5MoeExperts(QuantModule): def _setup(self): - """Modify the Qwen3_5MoeExperts by using nn.Linear layers.""" + """Modify the Qwen3_5MoeExperts by using per-expert nn.Module containers. + + This produces the naming pattern: experts.{id}.gate_proj.weight + (consistent with standard Qwen3 MoE). + """ from accelerate import init_empty_weights dtype, device = self.gate_up_proj.dtype, self.gate_up_proj.device @@ -668,21 +686,9 @@ def _copy_weight(module, weight): expert_dim = self.intermediate_dim with init_empty_weights(): - gate_proj = nn.ModuleList( - [ - nn.Linear(self.hidden_dim, expert_dim, bias=False) - for _ in range(self.num_experts) - ] - ) - up_proj = nn.ModuleList( - [ - nn.Linear(self.hidden_dim, expert_dim, bias=False) - for _ in range(self.num_experts) - ] - ) - down_proj = nn.ModuleList( + expert_modules = nn.ModuleList( [ - nn.Linear(expert_dim, self.hidden_dim, bias=False) + _Qwen3_5MoeExpertModule(self.hidden_dim, expert_dim) for _ in range(self.num_experts) ] ) @@ -690,17 +696,31 @@ def _copy_weight(module, weight): for idx in range(self.num_experts): # gate_up_proj shape: (num_experts, 2*intermediate_dim, hidden_dim) # Already in (out_features, in_features) format, no transpose needed - _copy_weight(gate_proj[idx], self.gate_up_proj[idx, :expert_dim, :]) - _copy_weight(up_proj[idx], self.gate_up_proj[idx, expert_dim:, :]) + _copy_weight(expert_modules[idx].gate_proj, self.gate_up_proj[idx, :expert_dim, :]) + _copy_weight(expert_modules[idx].up_proj, self.gate_up_proj[idx, expert_dim:, :]) # down_proj shape: (num_experts, hidden_dim, intermediate_dim) # Already in (out_features, in_features) format - _copy_weight(down_proj[idx], self.down_proj[idx]) + _copy_weight(expert_modules[idx].down_proj, self.down_proj[idx]) delattr(self, "gate_up_proj") delattr(self, "down_proj") - self.gate_proj = gate_proj - self.up_proj = up_proj - self.down_proj = down_proj + # Register expert modules directly as numbered children (like nn.ModuleList) + # so the naming pattern is: experts.{id}.gate_proj.weight (no extra nesting) + for idx in range(self.num_experts): + self.add_module(str(idx), expert_modules[idx]) + + def __len__(self): + """Support len() so the module is iterable like standard MoE experts.""" + return self.num_experts + + def __iter__(self): + """Support iteration over expert modules.""" + for idx in range(self.num_experts): + yield getattr(self, str(idx)) + + def __getitem__(self, idx): + """Support indexing to get individual expert modules.""" + return getattr(self, str(int(idx))) def forward( self, @@ -720,10 +740,11 @@ def forward( with torch.no_grad(): top_k_pos, token_idx = torch.where(expert_mask[expert_idx]) current_state = hidden_states[token_idx] - gate = self.gate_proj[expert_idx](current_state) - up = self.up_proj[expert_idx](current_state) + expert = self[expert_idx] + gate = expert.gate_proj(current_state) + up = expert.up_proj(current_state) current_hidden_states = self.act_fn(gate) * up - current_hidden_states = self.down_proj[expert_idx](current_hidden_states) + current_hidden_states = expert.down_proj(current_hidden_states) current_hidden_states = ( current_hidden_states * top_k_weights[token_idx, top_k_pos, None] ) From af5153917472fc925eb64f7f299f79dd3aa52b0a Mon Sep 17 00:00:00 2001 From: Zhiyu Cheng Date: Mon, 16 Feb 2026 17:19:04 -0800 Subject: [PATCH 6/7] adopt *experts.{id}.* naming pattern Signed-off-by: Zhiyu Cheng --- modelopt/torch/export/unified_export_hf.py | 49 ++++++++++++------- .../torch/quantization/plugins/huggingface.py | 12 ++--- 2 files changed, 38 insertions(+), 23 deletions(-) diff --git a/modelopt/torch/export/unified_export_hf.py b/modelopt/torch/export/unified_export_hf.py index dd069cacf..f61649d13 100644 --- a/modelopt/torch/export/unified_export_hf.py +++ b/modelopt/torch/export/unified_export_hf.py @@ -959,6 +959,36 @@ def _export_diffusers_checkpoint( print(f"Export complete. Saved to: {export_dir}") +def _revert_weight_conversion_noop(model: Any, state_dict: dict) -> dict: + """No-op replacement for transformers' revert_weight_conversion.""" + return state_dict + + +def _patch_revert_weight_conversion() -> list[tuple[Any, Any]]: + """Patch revert_weight_conversion in transformers to avoid IndexError on scalar tensors.""" + import importlib + + patches: list[tuple[Any, Any]] = [] + for mod_path in [ + "transformers.core_model_loading", + "transformers.modeling_utils", + ]: + try: + mod = importlib.import_module(mod_path) + if hasattr(mod, "revert_weight_conversion"): + patches.append((mod, getattr(mod, "revert_weight_conversion"))) + setattr(mod, "revert_weight_conversion", _revert_weight_conversion_noop) + except (ImportError, AttributeError): + pass + return patches + + +def _unpatch_revert_weight_conversion(patches: list[tuple[Any, Any]]) -> None: + """Restore the original revert_weight_conversion functions.""" + for mod, original in patches: + mod.revert_weight_conversion = original + + def export_hf_checkpoint( model: Any, dtype: torch.dtype | None = None, @@ -1022,21 +1052,7 @@ def export_hf_checkpoint( # quantized state dicts (scalar scale tensors have 0 dimensions, causing IndexError). # We must patch both the source module and the importing module since # modeling_utils does `from core_model_loading import revert_weight_conversion`. - _patches = [] - _noop = lambda model, state_dict: state_dict - for _mod_path in [ - "transformers.core_model_loading", - "transformers.modeling_utils", - ]: - try: - import importlib - - _mod = importlib.import_module(_mod_path) - if hasattr(_mod, "revert_weight_conversion"): - _patches.append((_mod, getattr(_mod, "revert_weight_conversion"))) - setattr(_mod, "revert_weight_conversion", _noop) - except (ImportError, AttributeError): - pass + _patches = _patch_revert_weight_conversion() try: model.save_pretrained( @@ -1045,8 +1061,7 @@ def export_hf_checkpoint( save_modelopt_state=save_modelopt_state, ) finally: - for _mod, _original in _patches: - _mod.revert_weight_conversion = _original + _unpatch_revert_weight_conversion(_patches) original_config = f"{export_dir}/config.json" config_data = {} diff --git a/modelopt/torch/quantization/plugins/huggingface.py b/modelopt/torch/quantization/plugins/huggingface.py index 94e8a67ec..5b49c93fe 100644 --- a/modelopt/torch/quantization/plugins/huggingface.py +++ b/modelopt/torch/quantization/plugins/huggingface.py @@ -653,7 +653,7 @@ def forward( return next_states -class _Qwen3_5MoeExpertModule(nn.Module): +class _Qwen35MoeExpertModule(nn.Module): """Container for a single Qwen3.5 MoE expert's linear layers. Produces the naming pattern: experts.{id}.gate_proj.weight @@ -667,7 +667,7 @@ def __init__(self, hidden_dim: int, expert_dim: int): self.down_proj = nn.Linear(expert_dim, hidden_dim, bias=False) -class _QuantQwen3_5MoeExperts(QuantModule): +class _QuantQwen35MoeExperts(QuantModule): def _setup(self): """Modify the Qwen3_5MoeExperts by using per-expert nn.Module containers. @@ -688,7 +688,7 @@ def _copy_weight(module, weight): with init_empty_weights(): expert_modules = nn.ModuleList( [ - _Qwen3_5MoeExpertModule(self.hidden_dim, expert_dim) + _Qwen35MoeExpertModule(self.hidden_dim, expert_dim) for _ in range(self.num_experts) ] ) @@ -898,7 +898,7 @@ def unpack_weight(self): pass -class _QuantQwen3_5MoeSparseMoeBlock(_QuantSparseMoe): +class _QuantQwen35MoeSparseMoeBlock(_QuantSparseMoe): """Qwen3.5 MoE stores top_k/num_experts in the router (self.gate), not as direct attributes. We override forward instead of just bridging attributes because the router (self.gate) @@ -927,12 +927,12 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if Qwen3_5MoeSparseMoeBlock not in QuantModuleRegistry: QuantModuleRegistry.register({Qwen3_5MoeSparseMoeBlock: "hf.Qwen3_5MoeSparseMoeBlock"})( - _QuantQwen3_5MoeSparseMoeBlock + _QuantQwen35MoeSparseMoeBlock ) if Qwen3_5MoeExperts not in QuantModuleRegistry: QuantModuleRegistry.register({Qwen3_5MoeExperts: "hf.Qwen3_5MoeExperts"})( - _QuantQwen3_5MoeExperts + _QuantQwen35MoeExperts ) except ImportError: pass From 4219853136f6000f0c63b5b5f8a89e7c0f758bb2 Mon Sep 17 00:00:00 2001 From: Zhiyu Cheng Date: Mon, 16 Feb 2026 17:20:21 -0800 Subject: [PATCH 7/7] adopt *experts.{id}.* naming pattern Signed-off-by: Zhiyu Cheng --- modelopt/torch/export/unified_export_hf.py | 27 ++++++++++++++-------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/modelopt/torch/export/unified_export_hf.py b/modelopt/torch/export/unified_export_hf.py index f61649d13..3235c5d2f 100644 --- a/modelopt/torch/export/unified_export_hf.py +++ b/modelopt/torch/export/unified_export_hf.py @@ -964,22 +964,31 @@ def _revert_weight_conversion_noop(model: Any, state_dict: dict) -> dict: return state_dict -def _patch_revert_weight_conversion() -> list[tuple[Any, Any]]: - """Patch revert_weight_conversion in transformers to avoid IndexError on scalar tensors.""" +def _try_patch_module(mod_path: str) -> tuple[Any, Any] | None: + """Try to patch revert_weight_conversion in a single module.""" import importlib + try: + mod = importlib.import_module(mod_path) + if hasattr(mod, "revert_weight_conversion"): + original = getattr(mod, "revert_weight_conversion") + setattr(mod, "revert_weight_conversion", _revert_weight_conversion_noop) + return (mod, original) + except (ImportError, AttributeError): + pass + return None + + +def _patch_revert_weight_conversion() -> list[tuple[Any, Any]]: + """Patch revert_weight_conversion in transformers to avoid IndexError on scalar tensors.""" patches: list[tuple[Any, Any]] = [] for mod_path in [ "transformers.core_model_loading", "transformers.modeling_utils", ]: - try: - mod = importlib.import_module(mod_path) - if hasattr(mod, "revert_weight_conversion"): - patches.append((mod, getattr(mod, "revert_weight_conversion"))) - setattr(mod, "revert_weight_conversion", _revert_weight_conversion_noop) - except (ImportError, AttributeError): - pass + result = _try_patch_module(mod_path) + if result is not None: + patches.append(result) return patches