Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
lazishu2000 committed Feb 6, 2024
2 parents b384af7 + 8abf8fa commit dc20078
Show file tree
Hide file tree
Showing 71 changed files with 18,067 additions and 109 deletions.
344 changes: 344 additions & 0 deletions openhgnn/config.ini
Original file line number Diff line number Diff line change
@@ -1,3 +1,96 @@
[DisenKGAT]
# str
name = Disen_Model
# data = DisenKGAT_WN18RR
# model = DisenKGAT
score_func = interacte
opn = cross
# gpu = 2
logdir = ./log/
config = ./config/
strategy = one_to_n
form = plain
mi_method = club_b
att_mode = dot_weight
score_method = dot_rel
score_order = after
gamma_method = norm


# int
k_w = 10
batch = 2048
test_batch = 2048
epoch = 1500
num_workers = 10
seed = 41504
init_dim = 100
gcn_dim = 200
embed_dim = 200
gcn_layer = 1
k_h = 20
num_filt = 200
ker_sz = 7
num_bases = -1
neg_num = 1000
ik_w = 10
ik_h = 20
inum_filt = 200
iker_sz = 9
iperm = 1
head_num = 1
num_factors = 3
early_stop = 200
mi_epoch = 1

# float
feat_drop = 0.3
hid_drop2 = 0.3
hid_drop = 0.3
gcn_drop = 0.4
gamma = 9.0
l2 = 0.0
lr = 0.001
lbl_smooth = 0.1
iinp_drop = 0.3
ifeat_drop = 0.4
ihid_drop = 0.3
alpha = 1e-1
max_gamma = 5.0
init_gamma = 9.0

# boolean
restore = False
bias = False
no_act = False
mi_train = True
no_enc = False
mi_drop = True
fix_gamma = False





[NBF]
input_dim = 32
hidden_dims = [32, 32, 32, 32, 32, 32]
message_func = distmult
aggregate_func = pna
short_cut = True
layer_norm = True
dependent = False
num_negative = 32
strict_negative = True
adversarial_temperature = 1
metric = ['mr', 'mrr', 'hits@1', 'hits@3', 'hits@10', 'hits@10_50']
lr = 0.005
gpus = [0]
batch_size = 64
num_epoch = 20
log_interval = 100


[General]
learning_rate = 0.01
weight_decay = 0.0001
Expand Down Expand Up @@ -887,3 +980,254 @@ embedding_size = 64
num_layers = 3
test_u_batch_size = 100
topks = 20

[Grail]
num_epochs: 100
eval_every: 3
eval_every_iter: 455
save_every: 10
early_stop: 100
optimizer: Adam
lr: 0.01
clip:1000
l2: 5e-4
margin: 10
max_links:1000000
hop: 3
max_nodes_per_hop: 0
use_kge_embeddings: False
kge_model: TransE
model_type: dgl
constrained_neg_prob: 0.0
batch_size: 16
num_neg_samples_per_link: 1
num_workers: 8
add_traspose_rels: False
enclosing_sub_graph: True
rel_emb_dim: 32
attn_rel_emb_dim: 32
emb_dim: 32
num_gcn_layers: 3
num_bases: 4
dropout: 0
edge_dropout: 0.5
gnn_agg_type: sum
add_ht_emb: True
has_attn: True
mode: sample

[ComPILE]
num_epochs: 100
eval_every: 3
eval_every_iter: 455
save_every: 10
early_stop: 100
optimizer: Adam
lr: 0.01
clip:1000
l2: 5e-4
margin: 10
max_links:1000000
hop: 3
max_nodes_per_hop: 0
use_kge_embeddings: False
kge_model: TransE
model_type: dgl
constrained_neg_prob: 0.0
batch_size: 16
num_neg_samples_per_link: 1
num_workers: 8
add_traspose_rels: False
enclosing_sub_graph: True
rel_emb_dim: 32
attn_rel_emb_dim: 32
emb_dim: 32
num_gcn_layers: 3
num_bases: 4
dropout: 0
edge_dropout: 0.5
gnn_agg_type: sum
add_ht_emb: True
has_attn: True
mode: sample


[AdapropT]
data_path = data/family/
layers=8
sampling=incremental
act=relu
weight=None
tau=1.0
train=True
remove_1hop_edges=False
scheduler=exp
fact_ratio=0.9
epoch=300
eval_interval=1
topk = 100
lr = 0.0036
decay_rate = 0.999
lamb = 0.000017
hidden_dim = 48
attn_dim = 5
dropout = 0.29
n_edge_topk = -1
n_layer = 8
n_batch = 20
n_node_topk = 800
seed = 1234
n_tbatch=20
eval=False
[AdapropI]
data_path=./data/fb237_v1
seed=1234

[LTE]
model_name_GCN=LTE_Transe
model_name=LTE
name=lte
data=FB15k-237
score_func=transe
opn=mult
hid_drop=0.2
gpu=0
x_ops=p
n_layer=0
init_dim=200
batch_size=64
epoch=300
l2=0.0
lr=0.001
lbl_smooth=0.1
num_workers=8
seed=12345
restore=False
bias=False
num_bases=-1
gcn_dim=200
gcn_drop=0.1
conve_hid_drop=0.3
feat_drop=0.2
input_drop=0.2
k_w=20
k_h=10
num_filt=200
ker_sz=7
gamma=9.0
rat=False
wni=False
wsi=False
ss=False
nobn=False
noltr=False
encoder=compgcn
max_epochs=500

[SACN]
seed=12345
init_emb_size=200
gc1_emb_size=150
embedding_dim=200
input_dropout=0
dropout_rate=0.2
channels=200
kernel_size=5
gpu=5
lr=0.002
n_epochs=300
num_workers=2
eval_every=1
dataset_data=FB15k-237
batch_size=64
patience=100
decoder=transe
gamma=9.0
name=repro
n_layer=1
rat=False
wsi=False
wni=False
ss=-1
final_act=True
final_bn=False
final_drop=False

[ExpressGNN]
seed=10
embedding_size = 128
gcn_free_size = 127
slice_dim = 16
no_train = 0
filtered = filtered
hidden_dim = 64
num_epochs = 100
batchsize = 16
trans = 0
num_hops = 2
num_mlp_layers = 2
num_batches = 100
learning_rate = 0.0005
lr_decay_factor = 0.5
lr_decay_patience = 100
lr_decay_min = 0.00001
patience = 20
l2_coef = 0.0
observed_prob = 0.9
entropy_temp = 1
no_entropy = 0
rule_weights_learning = 1
learning_rate_rule_weights = 0.001
epoch_mode = 0
shuffle_sampling = 1
load_method = 1
load_s = 1
use_gcn = 1
filter_latent = 0
closed_world = 0

[Ingram]
margin = 2
lr = 5e-4
nle = 2
nlr = 2
d_e = 32
d_r = 32
hdr_e = 8
hdr_r = 4
num_bin = 10
num_epoch = 10000
validation_epoch = 200
num_head = 8
num_neg = 10

[RedGNN]
seed = 0
patience = 3
batch_size = 100
hidden_dim = 64
optimizer = Adam
lr = 0.005
weight_decay = 0.0002
max_epoch = 50
decay_rate = 0.991
attn_dim = 5
dropout = 0.21
act = idd
n_layer = 5

[RedGNNT]
seed = 0
patience = 3
batch_size = 20
hidden_dim = 48
optimizer = Adam
lr = 0.0036
weight_decay = 0.000017
max_epoch = 50
decay_rate = 0.999
attn_dim = 5
dropout = 0.21
act = relu
n_layer = 3
n_tbatch = 50
Loading

0 comments on commit dc20078

Please sign in to comment.