-
Notifications
You must be signed in to change notification settings - Fork 4
/
node2vec_rec.py
140 lines (113 loc) · 4.43 KB
/
node2vec_rec.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
# -*- coding: utf-8 -*-
"""Node2Vec Rec.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1vfCAWs55pvWv4WZOId7xfApJUOVkXLCj
"""
from google.colab import drive
drive.mount('/content/gdrive')
from __future__ import division
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sn
import numpy as np
import pickle
def loadEmbeddings(filename):
f = open(filename)
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
return model
#Node2vec recommendation
model = loadEmbeddings('/content/gdrive/My Drive/8th_Sem_Project PES_293_323_355/node2vec.embeddings')
for key in model:
model[key] = list(model[key])
vals = model.values()
vals = list(vals)
del vals[0]
df = pd.DataFrame(vals)
kmeans = KMeans(n_clusters=5, random_state=0).fit(df)
labels = kmeans.labels_
user_cluster_number = {}
del model['14346']
user_cluster_number = dict.fromkeys(model.keys(),[])
index = 0
for key in user_cluster_number:
user_cluster_number[key] = labels[index]
index = index + 1
user_per_cluster_dict = {n:[k for k in user_cluster_number.keys() if user_cluster_number[k] == n] for n in set(user_cluster_number.values())}
f = pickle.load(open('/content/gdrive/My Drive/8th_Sem_Project PES_293_323_355/user_high_low_info.pickle','rb'))
top_users = pickle.load(open('/content/gdrive/My Drive/8th_Sem_Project PES_293_323_355/top_users.pickle','rb'))
top_users = list(top_users.keys())
count = 0
accuracy = 0
error = 0
final_df = pd.DataFrame(columns = ["User", "Recommended set length", "Actual recommended set length", "Common recommendations", "Common recommendation length" ,"Coverage", "Overall Coverage", "Hit Rate"])
for user in top_users:
if(len(f[user]['high']) > 20):
count += 1
sample_user = user
embedding_sample_user = model[sample_user]
cluster_sample_user = kmeans.predict([embedding_sample_user])
cluster_entries = user_per_cluster_dict[cluster_sample_user[0]]
cluster_entries_embeddings = []
for i in range(0, len(cluster_entries)):
cluster_entries_embeddings.append(model[cluster_entries[i]])
distance_from_sample_user = {}
for cluster_user in cluster_entries_embeddings:
dist = np.linalg.norm(np.array(model[sample_user])-np.array(cluster_user))
distance_from_sample_user[list(model.keys())[list(model.values()).index(cluster_user)]] = dist
distance_from_sample_user_sorted = [(k, distance_from_sample_user[k]) for k in sorted(distance_from_sample_user, key=distance_from_sample_user.get, reverse=False)]
distance_from_sample_user_sorted = distance_from_sample_user_sorted[1:11]
recommended_set = set()
for i in range (0,10):
influential_user = distance_from_sample_user_sorted[i][0]
recommended_set |= f[influential_user]['high']
row = {}
try:
row["User"] = sample_user
except:
continue
try:
row["Recommended set length"] = len(list(recommended_set))
except:
row["Recommended set length"] = 0
try:
row["Actual recommended set length"] = len(f[sample_user]['high'])
except:
row["Actual recommended set length"] = 0
try:
row["Common recommendations"] = recommended_set & f[sample_user]['high']
except:
row["Common recommendations"] = 0
try:
row["Common recommendation length"] = len(list(recommended_set & f[sample_user]['high']))
except:
row["Common recommendation length"] = 0
try:
row["Coverage"] = len(list(recommended_set & f[sample_user]['high']))/len(f[sample_user]['high'])
except:
row["Coverage"] = 0
try:
accuracy += len(list(recommended_set & f[sample_user]['high']))/len(f[sample_user]['high'])
row["Overall Coverage"] = accuracy/count
except:
row["Overall Coverage"] = 0
try:
error += len(list(recommended_set & f[sample_user]['high']))/len(list(recommended_set))
row["Hit Rate"] = error
except:
row["Hit Rate"] = 0
df1 = pd.DataFrame()
df1 = df1.append(row, ignore_index=True)
final_df = final_df.append(df1)
print(count)
final_df
pickle.dump(final_df, open("/content/gdrive/My Drive/8th_Sem_Project PES_293_323_355/node2vec_output.pickle","wb"))