-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpreprocess.py
More file actions
34 lines (30 loc) · 1.16 KB
/
preprocess.py
File metadata and controls
34 lines (30 loc) · 1.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import gensim
import sctokenizer
# Function to open a file, put it in readable format, tokenize it, and tag each line
def process_corpus(fname, tokens_only=False):
print("Tokenizing file")
tokens = sctokenizer.tokenize_file(fname, lang='c')
print("File tokenized")
# Form a dictionary with one key for every occupied line of the code file
line_tokens = {}
for i in range(len(tokens)):
curr_line = tokens[i].line
if i == 0:
line_tokens[curr_line] = []
else:
if curr_line == tokens[i - 1].line:
continue
else:
line_tokens[curr_line] = []
# Populate dictionary with token values
for i in range(len(tokens)):
curr_line = tokens[i].line
val = tokens[i].token_value
line_tokens[curr_line].append(val)
# Iterate through the dictionary and use values as lines to tag for Doc2Vec
for key in line_tokens:
if tokens_only:
yield line_tokens[key]
else:
# For training data, add tags
yield gensim.models.doc2vec.TaggedDocument(line_tokens[key], [key])