-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathconvert_qwen_gptq_to_awq.py
More file actions
157 lines (124 loc) · 5.39 KB
/
convert_qwen_gptq_to_awq.py
File metadata and controls
157 lines (124 loc) · 5.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import json
import fire
import torch
from pathlib import Path
from convert_utils import pack_u4, unpack_u4
from safetensors.torch import load_file, save_file
def convert_config(input: Path, output: Path):
config_json = input / "config.json"
assert config_json.exists()
config = json.loads(config_json.read_text())
assert "QWenLMHeadModel" in config["architectures"]
quantize_config_json = input / "quantize_config.json"
if quantize_config_json.exists():
quantize_config = json.loads(quantize_config_json.read_text())
assert quantize_config["bits"] == 4
assert quantize_config["desc_act"] == False
new_config = dict(
hidden_size=config["hidden_size"],
inner_hidden_size=config["intermediate_size"] // 2,
head_hidden_size=config["kv_channels"],
hidden_act="silu",
num_attention_heads=config["num_attention_heads"],
num_key_value_heads=config["num_attention_heads"],
num_layers=config["num_hidden_layers"],
qkv_bias=True,
o_bias=False,
vocab_size=config["vocab_size"],
dropout_rate=0.0,
layernorm_epsilon=1e-6,
max_sequence_length=2048,
)
model_config_json = output / "model_config.json"
model_config_json.write_text(json.dumps(new_config, indent=2))
def convert_tokenizer(input: Path, output: Path):
tiktoken_file = input / "qwen.tiktoken"
assert tiktoken_file.exists()
lines = tiktoken_file.read_text().splitlines()
pairs = [line.split(" ") for line in lines if line]
ranks = { k.strip(): int(v) for k, v in pairs }
eos_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>"]
special_tokens = eos_tokens + [
f"<|extra_{i}|>" for i in range(205)
]
tokenizer_config = dict(
eos_tokens=eos_tokens,
special_tokens={
k: v + len(ranks) for v, k in enumerate(special_tokens)
},
ranks=ranks,
pattern=r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
)
tokenizer_config_json = output / "tokenizer_config.json"
tokenizer_config_json.write_text(
json.dumps(tokenizer_config, indent=2, ensure_ascii=False),
encoding="utf-8",
)
def convert_weights(input: Path, output: Path, weight_is_awq: bool):
name_mapping = {
'transformer.wte.weight': 'word_embedding.weight',
'transformer.ln_f.weight': 'final_ln.weight',
'lm_head.weight': 'lm_head.weight'
}
for i in range(128):
name_mapping.update({
f'transformer.h.{i}.ln_1.weight': f'layers.{i}.attn_ln.weight',
f'transformer.h.{i}.attn.c_attn.bias': f'layers.{i}.attn.qkv_proj.bias',
f'transformer.h.{i}.ln_2.weight': f'layers.{i}.ffn_ln.weight',
})
for suffix in ["weight", "qweight", "qzeros", "scales"]:
name_mapping.update({
f'transformer.h.{i}.attn.c_attn.{suffix}': f'layers.{i}.attn.qkv_proj.{suffix}',
f'transformer.h.{i}.attn.c_proj.{suffix}': f'layers.{i}.attn.o_proj.{suffix}',
f'transformer.h.{i}.mlp.w1.{suffix}': f'layers.{i}.ffn.w_in.{suffix}',
f'transformer.h.{i}.mlp.w2.{suffix}': f'layers.{i}.ffn.w_gate.{suffix}',
f'transformer.h.{i}.mlp.c_proj.{suffix}': f'layers.{i}.ffn.w_out.{suffix}',
})
weight_files = list(input.glob("*.safetensors"))
is_safetensors = True
if len(weight_files) == 0:
weight_files = list(input.glob("*.bin"))
is_safetensors = False
assert len(weight_files) > 0
device = "cuda" if torch.cuda.is_available() else "cpu"
pack_order = [0, 2, 4, 6, 1, 3, 5, 7]
for weight_file in weight_files:
if is_safetensors:
weights = load_file(weight_file, device=device)
else:
weights = torch.load(weight_file, map_location=device)
converted = {}
for key, value in weights.items():
if not weight_is_awq:
if key.endswith(".qweight"):
# [in_dim // 8, out_dim] => [in_dim, out_dim // 8]
value = pack_u4(unpack_u4(value.T).T, pack_order)
elif key.endswith(".qzeros"):
# [in_dim // group_size, out_dim // 8] same shape but plus 1
value = pack_u4(unpack_u4(value) + 1, pack_order)
elif key.endswith(".g_idx"):
# ignore g_idx in gptq
continue
if key.endswith(".bias") and key not in name_mapping:
if not torch.allclose(value, torch.zeros_like(value), rtol=0.001):
raise RuntimeError(f"Non-zero bias in {key}.")
continue
converted[name_mapping[key]] = value
if is_safetensors:
file_name = weight_file.name
else:
file_name = weight_file.stem + ".safetensors"
save_file(converted, output / file_name)
def convert(input, output, weight_is_awq=False):
input = Path(input)
output = Path(output)
assert input.exists()
if output.exists():
print(f"Output path {output} already exists.")
output.mkdir(parents=True, exist_ok=True)
convert_config(input, output)
convert_tokenizer(input, output)
convert_weights(input, output, weight_is_awq)
print(f"Converted {input} to {output}.")
if __name__ == "__main__":
fire.Fire(convert)