-
Notifications
You must be signed in to change notification settings - Fork 0
/
coRetweet.py
65 lines (49 loc) · 2.66 KB
/
coRetweet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import pandas as pd
import numpy as np
import networkx as nx
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfTransformer
from scipy.sparse import csr_matrix
from pandas.api.types import CategoricalDtype
# Data assumptions:
# - 2 Pandas dataframes
# - control: control dataset -> includes only columns ['user', 'retweeted_status', 'id']
# - treated: information Operation dataset -> includes only columns ['tweetid', 'userid', 'retweet_tweetid']
def coRetweet(control, treated):
control.dropna(inplace=True)
treated.dropna(inplace=True)
control['retweet_id'] = control['retweeted_status'].apply(lambda x: int(dict(x)['id']))
control['userid'] = control['user'].apply(lambda x: int(dict(x)['id']))
control = control[['id', 'userid', 'retweet_id']]
control.columns = ['tweetid', 'userid', 'retweet_tweetid']
treated['retweet_tweetid'] = treated['retweet_tweetid'].astype(int)
cum = pd.concat([treated, control])
filt = cum[['userid', 'tweetid']].groupby(['userid'],as_index=False).count()
filt = list(filt.loc[filt['tweetid'] >= 20]['userid'])
cum = cum.loc[cum['userid'].isin(filt)]
cum = cum[['userid', 'retweet_tweetid']].drop_duplicates()
temp = cum.groupby('retweet_tweetid', as_index=False).count()
cum = cum.loc[cum['retweet_tweetid'].isin(temp.loc[temp['userid']>1]['retweet_tweetid'].to_list())]
cum['value'] = 1
ids = dict(zip(list(cum.retweet_tweetid.unique()), list(range(cum.retweet_tweetid.unique().shape[0]))))
cum['retweet_tweetid'] = cum['retweet_tweetid'].apply(lambda x: ids[x]).astype(int)
del ids
userid = dict(zip(list(cum.userid.astype(str).unique()), list(range(cum.userid.unique().shape[0]))))
cum['userid'] = cum['userid'].astype(str).apply(lambda x: userid[x]).astype(int)
person_c = CategoricalDtype(sorted(cum.userid.unique()), ordered=True)
thing_c = CategoricalDtype(sorted(cum.retweet_tweetid.unique()), ordered=True)
row = cum.userid.astype(person_c).cat.codes
col = cum.retweet_tweetid.astype(thing_c).cat.codes
sparse_matrix = csr_matrix((cum["value"], (row, col)), shape=(person_c.categories.size, thing_c.categories.size))
del row, col, person_c, thing_c
vectorizer = TfidfTransformer()
tfidf_matrix = vectorizer.fit_transform(sparse_matrix)
similarities = cosine_similarity(tfidf_matrix, dense_output=False)
df_adj = pd.DataFrame(similarities.toarray())
del similarities
df_adj.index = userid.keys()
df_adj.columns = userid.keys()
G = nx.from_pandas_adjacency(df_adj)
del df_adj
G.remove_nodes_from(list(nx.isolates(G)))
return G