-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdatasets.py
More file actions
244 lines (216 loc) · 9.91 KB
/
datasets.py
File metadata and controls
244 lines (216 loc) · 9.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
import pandas as pd
import random
from sklearn.model_selection import train_test_split
from utils.prompt import TASK_INST_TRAIN, TASK_INST_EVAL, TASK_INST_EVAL_UAR
class Blank(object):
def __init__(self, description, content):
"""
Represents a blank object. It is two situation: 1.crosscontext 2.blank object for signal
:param description: The description of object.
:param content: The content of object.
"""
self.description = description
self.content = content
def __str__(self):
return "\n" + self.description + "\n" + self.content
class Example(object):
def __init__(self, task_id, question, answer, crossfile_context):
"""
Represents an example used for constructing a dataset.
:param task_id: Task ID.
:param question: The question from a pair of qa.
:param answer: The answer from a pair of qa.
:param crossfile_context: Relative context of the question.
(The item of crossfile_context is a dict, and it's structure is {id: xxx,title:xxx, text:xxx})
"""
self.task_id = task_id
self.question = question
self.answer = answer
self.crossfile_context = crossfile_context
def __str__(self):
return (
f"[Example]:\n"
f"[Task ID]:\n{self.task_id}\n"
f"[Question]:\n{self.question}\n"
f"[Answer]:\n{self.answer}\n"
)
def load_test_dataset(args, datasetname):
"""
Loads a dataset for evaluation.
:param args: Parameters containing various configurations.
:param datasetname: The name of the dataset to load.
:return: The loaded dataset.
"""
# if datasetname == 'popqa':
# data_frame = pd.read_json("eval_data/popqa_longtail_w_gs.jsonl", lines=True)
# if datasetname == 'arc':
# data_frame = pd.read_json("eval_data/arc_challenge_processed.jsonl", lines=True)
# if datasetname == 'pubhealth':
# data_frame = pd.read_json("eval_data/health_claims_processed.jsonl", lines=True)
# if datasetname == 'triviaqa':
# data_frame = pd.read_json("eval_data/triviaqa_test_w_gs.jsonl", lines=True)
# if datasetname == 'ASQA':
# data_frame = pd.read_json("eval_data/asqa_eval_gtr_top100.jsonl", lines=True)
# if datasetname == 'FactScore':
# data_frame = pd.read_json("eval_data/factscore_unlabeled_alpaca_13b_retrieval.jsonl", lines=True)
if datasetname == 'drop':
data_frame = pd.read_json("eval_data/drop_dataset_dev_passage_qa_with_ret.json")
if datasetname == 'gsm8k':
data_frame = pd.read_json("eval_data/gsm8k_test_with_ret.json")
if datasetname == 'triviaqa':
data_frame = pd.read_json("eval_data/triviaqa_with_ret.json")
if datasetname == 'wq':
data_frame = pd.read_json("eval_data/wq_with_ret.json")
if datasetname == 'taqa':
data_frame = pd.read_json("eval_data/taqa_with_ret.jsonl", lines=True)
if datasetname == 'freshqa':
data_frame = pd.read_json("eval_data/freshqa_without_falsepremise_time_change_with_ret.jsonl", lines=True)
if args.debug:
data_frame = data_frame.sample(100)
dataset = []
if datasetname == 'drop':
for _,row in data_frame.iterrows():
question = TASK_INST_EVAL_UAR['drop'](row['passage'],row['question'])
dataset.append(
Example(task_id=row['query_id'],
question=question,
answer='',
crossfile_context=row['cotriever_results']
)
)
if datasetname == 'gsm8k':
for index,row in data_frame.iterrows():
question = TASK_INST_EVAL_UAR['gsm8k'](row['question'])
dataset.append(
Example(task_id=f"gsm8k_{index}",
question=question,
answer='',
crossfile_context=row['cotriever_results']
)
)
if datasetname == 'triviaqa':
for _,row in data_frame.iterrows():
dataset.append(
Example(task_id=row['question_id'],
question=row['question'],
answer='',
crossfile_context=row['cotriever_results']
)
)
if datasetname == 'wq':
for index,row in data_frame.iterrows():
dataset.append(
Example(task_id=f"wq_{index}",
question=row['question'],
answer='',
crossfile_context=row['cotriever_results']
)
)
if datasetname == 'taqa':
for index,row in data_frame.iterrows():
dataset.append(
Example(task_id=row['question_id'],
question=row['question'],
answer='',
crossfile_context=[{'text': row['refer_passage']}]
)
)
if datasetname == 'freshqa':
for index,row in data_frame.iterrows():
dataset.append(
Example(task_id=f"freshqa_{index}",
question=row['question'],
answer='',
crossfile_context=[{'text': row['refer_passage']}]
)
)
# if datasetname == 'popqa' or datasetname == 'triviaqa':
# for _,row in data_frame.iterrows():
# question = "### Instruction:\n" + row['question']
# # create a new example object for each row
# dataset.append(
# Example(task_id=row['id'],
# question=question,
# answer=row['answers'][0],
# crossfile_context=row['ctxs'])
# )
# if datasetname == 'arc':
# instruction = "### Instruction:\n" + TASK_INST_EVAL[datasetname] + '\n' + "## Input:\n\n"
# for _,row in data_frame.iterrows():
# choices = row["choices"]
# result = '\n'.join(f" {label}:{text}" for label, text in zip(choices['label'], choices['text']))
# # question = instruction + row['question'] + result + "\n\n### The answer is:\n"
# question = instruction + row['question'] + '\n' + result
# # create a new example object for each row
# dataset.append(
# Example(task_id=row['id'],
# question=question,
# answer=row['answerKey'],
# crossfile_context=row['ctxs'])
# )
# if datasetname == 'pubhealth':
# instruction = "### Instruction:\n" + TASK_INST_EVAL[datasetname] + '\n' + "## Input:\n\n"
# for index,row in data_frame.iterrows():
# # question = instruction + row['question'] + "\n\n### The answer is:\n"
# question = instruction + row['question']
# # create a new example object for each row
# dataset.append(
# Example(task_id=f"pubhealth_{index}",
# question=question,
# answer=row['answers'][0],
# crossfile_context=row['ctxs'])
# )
# if datasetname == 'ASQA':
# instruction = "### Instruction:\n" + TASK_INST_EVAL[datasetname] + '\n' + "## Input:\n\n"
# for index,row in data_frame.iterrows():
# # question = instruction + row['question'] + "\n\n### The answer is:\n"
# question = instruction + row['question']
# dataset.append(
# Example(task_id=f"ASQA_{index}",
# question=row['question'],
# answer=row['answer'],
# crossfile_context=row['docs'])
# )
return dataset
def load_train_and_valid_dataset(validation_split=0.2, random_seed=42):
"""
Loads the training dataset, and uses sklearn for split the dataset, 80% for train and 20% for valid.
:return: The training dataset.
"""
training_datasets = []
validation_datasets = []
# Load the data
data_frame = pd.read_json("data/alpaca.jsonl", lines=True)
training_datasets, validation_datasets = train_test_split(
data_frame,
test_size=validation_split,
random_state=random_seed,
shuffle=True # sklearn's train_test_split shuffles by default if shuffle=True
)
return training_datasets, validation_datasets
def construct_dataset(raw_data, num_samples):
"""
Builds a dataset.
:param raw_data: Raw data.
:param num_samples: The number of samples to generate, the default is length of datasets.
:return: The list of constructed samples.
"""
examples = []
data_index = 0
while len(examples) < num_samples:
# get an item from dataset
entry = raw_data.iloc[data_index % len(raw_data)]
data_index += 1
# get data from columns
task_id = entry['task_id']
question = entry['left_context']
answer = entry['groundtruth']
crossfile_context = entry['crossfile_context']
# create a new example object
examples.append(
Example(task_id=task_id, # 数据集中有task_id
question=question, # left_context即为question
answer=answer, # 将ground_truth作为answer
crossfile_context=crossfile_context) #初筛后的top100个文档
)
return examples