-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathPreProcessing.py
More file actions
207 lines (171 loc) · 7.17 KB
/
PreProcessing.py
File metadata and controls
207 lines (171 loc) · 7.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
# save as data/preprocess_squad.py
import json
import os
import pandas as pd
import numpy as np
from datasets import load_from_disk, Dataset
import torch
from transformers import BertTokenizer, AutoTokenizer
def preprocess_squad(model_type="bert"):
"""
Preprocess SQuAD dataset for different model types
Args:
model_type: Type of model ("bert", "gpt", "lstm")
Returns:
Processed datasets
"""
# Load dataset
try:
with open('data/raw/squad_train.json', 'r') as f:
train_data = json.load(f)
with open('data/raw/squad_validation.json', 'r') as f:
val_data = json.load(f)
train_dataset = Dataset.from_dict(train_data)
val_dataset = Dataset.from_dict(val_data)
except:
# If local files don't exist, download from Hugging Face
from datasets import load_dataset
dataset = load_dataset('rajpurkar/squad')
train_dataset = dataset['train']
val_dataset = dataset['validation']
# Create output directory
os.makedirs(f'data/processed/{model_type}', exist_ok=True)
# Apply different preprocessing based on model type
if model_type == "bert":
return preprocess_for_bert(train_dataset, val_dataset)
elif model_type == "gpt":
return preprocess_for_gpt(train_dataset, val_dataset)
elif model_type == "lstm":
return preprocess_for_lstm(train_dataset, val_dataset)
else:
raise ValueError(f"Unknown model_type: {model_type}")
def preprocess_for_bert(train_dataset, val_dataset):
"""Preprocess for BERT-based models"""
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def preprocess_function(examples):
questions = [q.strip() for q in examples["question"]]
contexts = [c.strip() for c in examples["context"]]
# Tokenize inputs
inputs = tokenizer(
questions,
contexts,
max_length=384,
truncation="only_second",
stride=128,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Get answer positions
offset_mapping = inputs.pop("offset_mapping")
sample_map = inputs.pop("overflow_to_sample_mapping")
start_positions = []
end_positions = []
for i, offset in enumerate(offset_mapping):
sample_idx = sample_map[i]
answer = examples["answers"][sample_idx]
start_char = answer["answer_start"][0] if len(answer["answer_start"]) > 0 else 0
end_char = start_char + len(answer["text"][0]) if len(answer["text"]) > 0 else 0
# Find token positions that correspond to the answer
token_start_index = 0
while token_start_index < len(offset) and offset[token_start_index][0] <= start_char:
token_start_index += 1
token_start_index -= 1
token_end_index = token_start_index
while token_end_index < len(offset) and offset[token_end_index][1] <= end_char:
token_end_index += 1
token_end_index -= 1
# If answer is out of bounds or truncated
if token_start_index >= 384 or token_end_index >= 384:
start_positions.append(0)
end_positions.append(0)
else:
start_positions.append(token_start_index)
end_positions.append(token_end_index)
inputs["start_positions"] = start_positions
inputs["end_positions"] = end_positions
return inputs
# Apply preprocessing
train_processed = train_dataset.map(
preprocess_function,
batched=True,
remove_columns=train_dataset.column_names,
)
val_processed = val_dataset.map(
preprocess_function,
batched=True,
remove_columns=val_dataset.column_names,
)
# Save processed datasets
train_processed.save_to_disk('data/processed/bert/train')
val_processed.save_to_disk('data/processed/bert/validation')
print(f"BERT preprocessing complete. Examples: {len(train_processed)}")
return train_processed, val_processed
def preprocess_for_gpt(train_dataset, val_dataset):
"""Preprocess for GPT-based models"""
tokenizer = AutoTokenizer.from_pretrained('gpt2')
tokenizer.pad_token = tokenizer.eos_token
def preprocess_function(examples):
# Format for GPT: "Context: {context} Question: {question} Answer:"
texts = [
f"Context: {context} Question: {question} Answer: {answer['text'][0] if len(answer['text']) > 0 else 'No answer'}"
for context, question, answer in zip(examples["context"], examples["question"], examples["answers"])
]
# Tokenize
encodings = tokenizer(
texts,
truncation=True,
max_length=512,
padding="max_length",
return_tensors="pt"
)
return encodings
# Apply preprocessing
train_processed = train_dataset.map(
preprocess_function,
batched=True,
remove_columns=train_dataset.column_names,
)
val_processed = val_dataset.map(
preprocess_function,
batched=True,
remove_columns=val_dataset.column_names,
)
# Save processed datasets
train_processed.save_to_disk('data/processed/gpt/train')
val_processed.save_to_disk('data/processed/gpt/validation')
print(f"GPT preprocessing complete. Examples: {len(train_processed)}")
return train_processed, val_processed
def preprocess_for_lstm(train_dataset, val_dataset):
"""Preprocess for LSTM-based models"""
# For LSTM, we'll convert text to indices and create embeddings
# This is a simplified version - in practice, you'd build a vocabulary
def preprocess_function(examples):
# Extract features - simplified for this example
features = {
"context": examples["context"],
"question": examples["question"],
"answer_text": [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in examples["answers"]],
"answer_start": [answer["answer_start"][0] if len(answer["answer_start"]) > 0 else -1 for answer in examples["answers"]]
}
return features
# Apply preprocessing
train_processed = train_dataset.map(
preprocess_function,
batched=True,
remove_columns=["id", "title"],
)
val_processed = val_dataset.map(
preprocess_function,
batched=True,
remove_columns=["id", "title"],
)
# Save processed datasets
train_processed.to_pandas().to_csv('data/processed/lstm/train.csv', index=False)
val_processed.to_pandas().to_csv('data/processed/lstm/validation.csv', index=False)
print(f"LSTM preprocessing complete. Examples: {len(train_processed)}")
return train_processed, val_processed
if __name__ == "__main__":
for model_type in ["bert", "gpt", "lstm"]:
print(f"\nPreprocessing for {model_type}...")
preprocess_squad(model_type)