-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
238 lines (192 loc) · 7.71 KB
/
utils.py
File metadata and controls
238 lines (192 loc) · 7.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
from typing import List, Dict, Optional, Union
import os
import json
import re
import string
from collections import Counter
import numpy as np
import pandas as pd
def load_long_text(file_path: str, use_loogle: Optional[int] = None) -> str:
"""
Load long text from various file formats.
Args:
file_path: Path to text file
Returns:
Text content as string
"""
if not os.path.exists(file_path):
raise FileNotFoundError(f"Long text file not found: {file_path}")
_, ext = os.path.splitext(file_path)
if use_loogle is not None:
with open(file_path, 'r', encoding='utf-8') as f:
data = [json.loads(line) for line in f.readlines()]
text = data[use_loogle]['input']
return text.strip()
try:
if ext in ['.txt', '.md', '.rst']:
with open(file_path, 'r', encoding='utf-8') as f:
text = f.read()
elif ext == '.json':
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
if isinstance(data, dict):
text = data.get('text', '') or data.get('content', '') or str(data)
elif isinstance(data, list):
text = '\n\n'.join([
item.get('text', '') or item.get('content', '') or str(item)
for item in data
])
else:
text = str(data)
elif ext == '.jsonl':
with open(file_path, 'r', encoding='utf-8') as f:
lines = [json.loads(line) for line in f]
text = '\n\n'.join([
line.get('text', '') or line.get('content', '') or str(line)
for line in lines
])
else:
# Default: treat as plain text
with open(file_path, 'r', encoding='utf-8') as f:
text = f.read()
return text.strip()
except Exception as e:
raise RuntimeError(f"Failed to load long text from {file_path}: {e}")
def find_sentence_bounds(text, chunk_text):
"""
Find the start and end positions of chunk_text in text, and expand both ends to the closest sentence boundary.
Here we assume sentences end with typical punctuation: '.', '?', '!', '。', '?', '!'
"""
import re
# Use chunk_text as anchor; find its positions in full_text
# For robustness, find all occurrences and use the first one
starts = [m.start() for m in re.finditer(re.escape(chunk_text), text)]
if not starts:
return 0, len(chunk_text)
start_char = starts[0]
end_char = start_char + len(chunk_text)
# Define sentence-ending punctuation
sentence_endings = r'[\.!?。!?]'
# Scan left to the nearest sentence end
left_text = text[:start_char]
left_match = list(re.finditer(sentence_endings, left_text))
left = left_match[-1].end() if left_match else 0
# Scan right to the nearest sentence end
right_text = text[end_char:]
right_match = re.search(sentence_endings, right_text)
right = end_char + right_match.end() if right_match else len(text)
return left, right
def chunk_long_text(
text: str,
chunk_size: Union[int, List[int]] = 2048,
overlap: int = 256,
tokenizer = None,
) -> List[Dict[str, any]]:
"""
Split long text into overlapping chunks for processing.
Args:
text: The long text to be chunked
chunk_size: Maximum number of tokens per chunk
overlap: Number of overlapping tokens between consecutive chunks
tokenizer: Tokenizer to count tokens (if None, use character approximation)
Returns:
List of dictionaries containing chunk info:
- 'text': chunk text
- 'start_idx': start position in original text
- 'end_idx': end position in original text
- 'chunk_id': sequential chunk identifier
"""
chunks = []
if isinstance(chunk_size, int):
chunk_sizes = [chunk_size]
else:
chunk_sizes = chunk_size
# Use tokenizer for accurate chunking
tokens = tokenizer.encode(text)
chunk_id = 0
for chunk_size in chunk_sizes:
start_token = 0
while start_token < len(tokens):
end_token = min(start_token + chunk_size, len(tokens))
chunk_tokens = tokens[start_token:end_token]
chunk_text = tokenizer.decode(chunk_tokens, skip_special_tokens=True)
left, right = find_sentence_bounds(text, chunk_text)
chunk_text = text[left:right]
chunks.append({
'text': chunk_text,
'chunk_id': chunk_id,
})
chunk_id += 1
start_token += chunk_size - overlap
if start_token >= len(tokens):
break
print(f"[INFO] Chunked long text into {len(chunks)} chunks")
return chunks
def extract_qa_pair(text: str) -> Dict[str, str]:
"""
Extract question and answer from model-generated text.
Expected format: <question>...</question><answer>...</answer>
Args:
text: Model-generated text containing question and answer tags
Returns:
Dictionary with 'question' and 'answer' keys, or None if extraction fails
"""
import re
# Try to extract question
question_match = re.search(r'<question>(.*?)</question>', text, re.DOTALL | re.IGNORECASE)
if question_match:
question = question_match.group(1).strip()
else:
# Fallback: look for alternative markers
question = None
# Try to extract answer
answer_match = re.search(r'<answer>(.*?)</answer>', text, re.DOTALL | re.IGNORECASE)
if answer_match:
answer = answer_match.group(1).strip()
else:
# Fallback: look for alternative markers
answer = None
if question and answer:
return {
'question': question,
'answer': answer,
'extraction_success': True
}
else:
# If tags not found, try to infer from structure
# This is a fallback for robustness
return {
'question': text[:len(text)//2].strip() if not question else question,
'answer': text[len(text)//2:].strip() if not answer else answer,
'extraction_success': False
}
def normalize_answer(s):
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction: str, ground_truth: str):
normalized_prediction = normalize_answer(prediction)
normalized_ground_truth = normalize_answer(ground_truth)
prediction_tokens = normalized_prediction.split()
ground_truth_tokens = normalized_ground_truth.split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0.0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
if __name__ == "__main__":
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("/ceph/home/muhan01/huggingfacemodels/Qwen2.5-7B-Instruct")
text = load_long_text("data/squad_loogle_format.jsonl", use_loogle=0)
chunks = chunk_long_text(text, chunk_size=2048, overlap=256, tokenizer=tokenizer)
print(chunks)