forked from YXNan0110/RANA
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathFINAL.py
More file actions
205 lines (172 loc) · 8.1 KB
/
FINAL.py
File metadata and controls
205 lines (172 loc) · 8.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
import numpy as np
import time
from sklearn.preprocessing import normalize
# from evaluation.matcher import top_k, greedy_match
from metrics import get_statistics
from numpy import inf, nan
from graph_utils import get_H, normalize_matrix
import graph_utils
import argparse
import os
from dataset import Dataset
from network_alignment_model import NetworkAlignmentModel
from abc import ABCMeta
class FINAL(NetworkAlignmentModel):
"""
Input:
A1, A2: Input adjacency matrices with n1, n2 nodes
N1, N2: Node attributes matrices, N1 is an n1*K matrix, N2 is an n2*K
matrix, each row is a node, and each column represents an
attribute. If the input node attributes are categorical, we can
use one hot encoding to represent each node feature as a vector.
And the input N1 and N2 are still n1*K and n2*K matrices.
E.g., for node attributes as countries, including USA, China, Canada,
if a user is from China, then his node feature is (0, 1, 0).
If N1 and N2 are emtpy, i.e., N1 = [], N2 = [], then no node
attributes are input.
E1, E2: a L*1 cell, where E1{i} is the n1*n1 matrix and nonzero entry is
the i-th attribute of edges. E2{i} is same. Similarly, if the
input edge attributes are categorical, we can use one hot
encoding, i.e., E1{i}(a,b)=1 if edge (a,b) has categorical
attribute i. If E1 and E2 are empty, i.e., E1 = {} or [], E2 = {}
or [], then no edge attributes are input.
H: a n2*n1 prior node similarity matrix, e.g., degree similarity. H
should be normalized, e.g., sum(sum(H)) = 1.
alpha: decay factor
maxiter, tol: maximum number of iterations and difference tolerance.
Output:
S: an n2*n1 alignment matrix, entry (x,y) represents to what extend node-
x in A2 is aligned to node-y in A1
"""
def __init__(self, source_dataset, target_dataset, H=None, alpha=0.82, maxiter=30, tol=1e-4, train_dict=None):
self.source_dataset = source_dataset
self.target_dataset = target_dataset
self.A1 = source_dataset.get_adjacency_matrix()
self.A2 = target_dataset.get_adjacency_matrix()
self.alpha = alpha
self.maxiter = maxiter
# if labels is None:
# labels = np.zeros(((self.A1.shape[0], self.A2.shape[1])))
# for k, v in train_dict.items():
# labels[k][v] = 1
# if train_dict is not None:
# print("This is Supervised FINAL")
self.H = get_H(H, source_dataset, target_dataset, train_dict)
# self.H = get_H(H, source_dataset, target_dataset, train_dict, labels)
self.tol = tol
self.N1 = source_dataset.features
self.N2 = target_dataset.features
self.E1 = source_dataset.load_edge_features()
self.E2 = target_dataset.load_edge_features()
self.alignment_matrix = None
# % If no node attributes input, then initialize as a vector of 1
# % so that all nodes are treated to have the save attributes which
# % is equivalent to no given node attribute.
# E1 should be (2, A1.shape[0], A1.shape[1])
if self.N1 is None and self.N2 is None:
self.N1 = np.ones((self.A1.shape[0], 1))
self.N2 = np.ones((self.A2.shape[0], 1))
self.no_edge_feat = False
if self.E1 is None and self.E2 is None:
self.E1 = np.zeros((1, self.A1.shape[0], self.A1.shape[1]))
self.E2 = np.zeros((1, self.A2.shape[0], self.A2.shape[1]))
self.E1[0] = self.A1
self.E2[0] = self.A2
self.no_edge_feat = True
def align(self):
L = self.E1.shape[0] # edge feature
K = self.N1.shape[1] # node feature
if not self.no_edge_feat:
T1 = np.zeros_like(self.A1)
T2 = np.zeros_like(self.A2)
# Normalize edge feature vectors
for i in range(L):
T1 += self.E1[i] ** 2
T2 += self.E2[i] ** 2
for i in range(T1.shape[0]):
for j in range(T1.shape[1]):
if T1[i, j] > 0:
T1[i, j] = 1./T1[i, j]
for i in range(T2.shape[0]):
for j in range(T2.shape[1]):
if T2[i, j] > 0:
T2[i, j] = 1./T2[i, j]
for i in range(L):
self.E1[i] = self.E1[i]*T1
self.E2[i] = self.E2[i]*T2
# Normalize node feature vectors
self.N1 = normalize(self.N1)
self.N2 = normalize(self.N2)
# Compute node feature cosine cross-similarity
n1 = self.A1.shape[0]
n2 = self.A2.shape[0]
N = np.zeros(n1 * n2)
for k in range(K):
N += np.kron(self.N1[:, k], self.N2[:, k])
# Compute the Kronecker degree vector
d = np.zeros_like(N)
for i in range(L):
for k in range(K):
d += np.kron(np.dot(self.E1[i]*self.A1, self.N1[:, k]), np.dot(self.E2[i]*self.A2, self.N2[:, k]))
D = N*d
DD = np.where(D == 0, 0, 1. / np.sqrt(D))
DD[DD == inf] = 0
# fixed-point solution
q = DD*N
h = self.H.flatten('F')
s = h
for i in range(self.maxiter):
# print("iterations ", i + 1)
prev = s
M = (q*s).reshape((n2, n1), order='F' )
S = np.zeros((n2, n1))
for l in range(L):
S += np.dot(np.dot(self.E2[l]*self.A2, M), self.E1[l]*self.A1)
s = (1- self.alpha) * h + self.alpha * q * S.flatten('F')
diff = np.sqrt(np.sum((s - prev)**2))
# print(diff)
if diff < self.tol:
continue
break
self.alignment_matrix = s.reshape((n1, n2))
normalized_S = normalize_matrix(self.alignment_matrix)
return self.alignment_matrix, normalized_S
def get_alignment_matrix(self):
if self.alignment_matrix is None:
raise Exception("Must calculate alignment matrix by calling 'align()' method first")
return self.alignment_matrix
def parse_args():
parser = argparse.ArgumentParser(description="FINAL")
parser.add_argument('--prefix1', default="graph_data/douban/offline/graphsage/")
parser.add_argument('--prefix2', default="graph_data/douban/online/graphsage/")
parser.add_argument('--dataset', default='douban')
parser.add_argument('--groundtruth', default="graph_data/douban/dictionaries/")
parser.add_argument('--H', default=None)
parser.add_argument('--max_iter', default=30, type=int)
parser.add_argument('--alpha', default=0.82, type=float)
parser.add_argument('--tol', default=1e-4, type=float)
parser.add_argument('--k', default=1, type=int)
parser.add_argument('--rate', default=0.01, type=float)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
print(args)
source_dataset = Dataset(args.prefix1)
target_dataset = Dataset(args.prefix2)
'''
douban dataset has some problems!!!
data in dictionaries have reverse to use!!!
'''
groundtruth = graph_utils.load_gt(os.path.join(args.groundtruth, f"groundtruth"), source_dataset.id2idx, target_dataset.id2idx, 'dict', args.dataset)
train_dict = graph_utils.load_gt(os.path.join(args.groundtruth, f"node,split={args.rate}.train.dict"),
source_dataset.id2idx, target_dataset.id2idx, 'dict', args.dataset)
model = FINAL(source_dataset, target_dataset, None, args.alpha, args.max_iter, args.tol, train_dict)
S = model.align()
print(S)
print("-"*100)
acc, MAP, top5, top10, S_pairs = get_statistics(S, groundtruth, use_greedy_match=True, get_all_metric=True)
print("Accuracy: {:.4f}".format(acc))
print("MAP: {:.4f}".format(MAP))
print("Precision_5: {:.4f}".format(top5))
print("Precision_10: {:.4f}".format(top10))
print(S_pairs) # Alignment metric and 1 represent align