-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathbasic.py
More file actions
59 lines (47 loc) · 1.72 KB
/
basic.py
File metadata and controls
59 lines (47 loc) · 1.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#
# Copyright (C) 2025 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
import torch
import datasets
from transformers.tokenization_utils_base import BatchEncoding
from llmart import DataMapper
class BasicBuilder(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo()
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name="train")]
def _generate_examples(self, **kwargs):
example = dict(
prompt="Tell me about the planet Saturn.", completion="NO WAY JOSE"
)
yield 0, example
class BasicMapper(DataMapper):
def __call__(self, batch):
# Mark conversation noting that they are batched
convs = [
[
dict(role="user", content=self.modify_prompt(prompt)),
dict(role="assistant", content=self.force_completion(completion)),
]
for prompt, completion in zip(batch["prompt"], batch["completion"])
]
# Turn conversation into input_ids and masks.
# NOTE: One could use llmart.ConversationMapper or return bare conversations.
inputs = self.tokenizer.apply_chat_template(
convs,
padding=True,
return_tensors="pt",
return_dict=True,
)
assert isinstance(inputs, BatchEncoding)
# Construct labels from response_mask
input_ids = inputs["input_ids"]
assert isinstance(input_ids, torch.Tensor)
labels = input_ids.detach().clone()
response_mask = inputs["response_mask"]
assert isinstance(response_mask, torch.Tensor)
labels[~response_mask] = -100
inputs["labels"] = labels
return inputs.data