diff --git a/Jenkinsfile b/Jenkinsfile index 8b1054343535..233f475f532b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -217,11 +217,11 @@ pipeline { // unit_test_linux("pytorch", "cpu") // } //} - //stage("Tutorial test") { - // steps { - // tutorial_test_linux("mxnet") - // } - //} + stage("Tutorial test") { + steps { + tutorial_test_linux("mxnet") + } + } } } stage("MXNet GPU") { @@ -243,11 +243,11 @@ pipeline { // unit_test_linux("pytorch", "cpu") // } //} - //stage("Tutorial test") { - // steps { - // tutorial_test_linux("mxnet") - // } - //} + stage("Tutorial test") { + steps { + tutorial_test_linux("mxnet") + } + } } } } diff --git a/tutorials/models/5_giant_graph/1_sampling_mx.py b/tutorials/models/5_giant_graph/1_sampling_mx.py index 39b3834abbc4..1e8484cb5738 100644 --- a/tutorials/models/5_giant_graph/1_sampling_mx.py +++ b/tutorials/models/5_giant_graph/1_sampling_mx.py @@ -250,9 +250,9 @@ def forward(self, nf): # dropout probability dropout = 0.2 # batch size -batch_size = 10000 +batch_size = 1000 # number of neighbors to sample -num_neighbors = 8 +num_neighbors = 4 # number of epochs num_epochs = 1 @@ -267,6 +267,7 @@ def forward(self, nf): {'learning_rate': 0.03, 'wd': 0}) for epoch in range(num_epochs): + i = 0 for nf in dgl.contrib.sampling.NeighborSampler(g, batch_size, num_neighbors, neighbor_type='in', @@ -291,6 +292,10 @@ def forward(self, nf): # optimization trainer.step(batch_size=1) print("Epoch[{}]: loss {}".format(epoch, loss.asscalar())) + i += 1 + # We only train the model with 32 mini-batches just for demonstration. + if i >= 32: + break ############################################################################## # Control Variate