Skip to content

Commit

Permalink
[Feature] Add left normalizer for GCN (#3114)
Browse files Browse the repository at this point in the history
* add left normalizer for gcn

* fix

* fixes and some bug stuff
  • Loading branch information
BarclayII committed Jul 13, 2021
1 parent fac75e1 commit b576e61
Show file tree
Hide file tree
Showing 8 changed files with 72 additions and 35 deletions.
2 changes: 2 additions & 0 deletions examples/pytorch/GATNE-T/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ Requirements
pip install -r requirements.txt
```

Also requires PyTorch 1.7.0+.

Datasets
--------

Expand Down
5 changes: 4 additions & 1 deletion python/dgl/dataloading/pytorch/dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,10 @@
from ...base import DGLError
from ...utils import to_dgl_context

__all__ = ['NodeDataLoader', 'EdgeDataLoader', 'GraphDataLoader']
__all__ = ['NodeDataLoader', 'EdgeDataLoader', 'GraphDataLoader',
# Temporary exposure.
'_pop_subgraph_storage', '_pop_blocks_storage',
'_restore_subgraph_storage', '_restore_blocks_storage']

PYTORCH_VER = LooseVersion(th.__version__)
PYTORCH_16 = PYTORCH_VER >= LooseVersion("1.6.0")
Expand Down
33 changes: 22 additions & 11 deletions python/dgl/nn/mxnet/conv/graphconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,18 @@ class GraphConv(gluon.Block):
out_feats : int
Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.
norm : str, optional
How to apply the normalizer. If is `'right'`, divide the aggregated messages
by each node's in-degrees, which is equivalent to averaging the received messages.
If is `'none'`, no normalization is applied. Default is `'both'`,
where the :math:`c_{ij}` in the paper is applied.
How to apply the normalizer. Can be one of the following values:
* ``right``, to divide the aggregated messages by each node's in-degrees,
which is equivalent to averaging the received messages.
* ``none``, where no normalization is applied.
* ``both`` (default), where the messages are scaled with :math:`1/c_{ji}` above, equivalent
to symmetric normalization.
* ``left``, to divide the messages sent out from each node by its out-degrees,
equivalent to random walk normalization.
weight : bool, optional
If True, apply a linear layer. Otherwise, aggregating the messages
without a weight matrix.
Expand Down Expand Up @@ -136,8 +144,8 @@ def __init__(self,
activation=None,
allow_zero_in_degree=False):
super(GraphConv, self).__init__()
if norm not in ('none', 'both', 'right'):
raise DGLError('Invalid norm value. Must be either "none", "both" or "right".'
if norm not in ('none', 'both', 'right', 'left'):
raise DGLError('Invalid norm value. Must be either "none", "both", "right" or "left".'
' But got "{}".'.format(norm))
self._in_feats = in_feats
self._out_feats = out_feats
Expand Down Expand Up @@ -230,15 +238,18 @@ def forward(self, graph, feat, weight=None):
'suppress the check and let the code run.')

feat_src, feat_dst = expand_as_pair(feat, graph)

if self._norm == 'both':
degs = graph.out_degrees().as_in_context(feat_src.context).astype('float32')
if self._norm in ['both', 'left']:
degs = graph.out_degrees().as_in_context(feat_dst.context).astype('float32')
degs = mx.nd.clip(degs, a_min=1, a_max=float("inf"))
norm = mx.nd.power(degs, -0.5)
if self._norm == 'both':
norm = mx.nd.power(degs, -0.5)
else:
norm = 1.0 / degs
shp = norm.shape + (1,) * (feat_src.ndim - 1)
norm = norm.reshape(shp)
feat_src = feat_src * norm


if weight is not None:
if self.weight is not None:
raise DGLError('External weight is provided while at the same time the'
Expand All @@ -264,7 +275,7 @@ def forward(self, graph, feat, weight=None):
if weight is not None:
rst = mx.nd.dot(rst, weight)

if self._norm != 'none':
if self._norm in ['both', 'right']:
degs = graph.in_degrees().as_in_context(feat_dst.context).astype('float32')
degs = mx.nd.clip(degs, a_min=1, a_max=float("inf"))
if self._norm == 'both':
Expand Down
29 changes: 20 additions & 9 deletions python/dgl/nn/pytorch/conv/graphconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,10 +173,18 @@ class GraphConv(nn.Module):
out_feats : int
Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.
norm : str, optional
How to apply the normalizer. If is `'right'`, divide the aggregated messages
by each node's in-degrees, which is equivalent to averaging the received messages.
If is `'none'`, no normalization is applied. Default is `'both'`,
where the :math:`c_{ji}` in the paper is applied.
How to apply the normalizer. Can be one of the following values:
* ``right``, to divide the aggregated messages by each node's in-degrees,
which is equivalent to averaging the received messages.
* ``none``, where no normalization is applied.
* ``both`` (default), where the messages are scaled with :math:`1/c_{ji}` above, equivalent
to symmetric normalization.
* ``left``, to divide the messages sent out from each node by its out-degrees,
equivalent to random walk normalization.
weight : bool, optional
If True, apply a linear layer. Otherwise, aggregating the messages
without a weight matrix.
Expand Down Expand Up @@ -270,8 +278,8 @@ def __init__(self,
activation=None,
allow_zero_in_degree=False):
super(GraphConv, self).__init__()
if norm not in ('none', 'both', 'right'):
raise DGLError('Invalid norm value. Must be either "none", "both" or "right".'
if norm not in ('none', 'both', 'right', 'left'):
raise DGLError('Invalid norm value. Must be either "none", "both", "right" or "left".'
' But got "{}".'.format(norm))
self._in_feats = in_feats
self._out_feats = out_feats
Expand Down Expand Up @@ -395,9 +403,12 @@ def forward(self, graph, feat, weight=None, edge_weight=None):

# (BarclayII) For RGCN on heterogeneous graphs we need to support GCN on bipartite.
feat_src, feat_dst = expand_as_pair(feat, graph)
if self._norm == 'both':
if self._norm in ['left', 'both']:
degs = graph.out_degrees().float().clamp(min=1)
norm = th.pow(degs, -0.5)
if self._norm == 'both':
norm = th.pow(degs, -0.5)
else:
norm = 1.0 / degs
shp = norm.shape + (1,) * (feat_src.dim() - 1)
norm = th.reshape(norm, shp)
feat_src = feat_src * norm
Expand Down Expand Up @@ -425,7 +436,7 @@ def forward(self, graph, feat, weight=None, edge_weight=None):
if weight is not None:
rst = th.matmul(rst, weight)

if self._norm != 'none':
if self._norm in ['right', 'both']:
degs = graph.in_degrees().float().clamp(min=1)
if self._norm == 'both':
norm = th.pow(degs, -0.5)
Expand Down
32 changes: 21 additions & 11 deletions python/dgl/nn/tensorflow/conv/graphconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,18 @@ class GraphConv(layers.Layer):
out_feats : int
Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`.
norm : str, optional
How to apply the normalizer. If is `'right'`, divide the aggregated messages
by each node's in-degrees, which is equivalent to averaging the received messages.
If is `'none'`, no normalization is applied. Default is `'both'`,
where the :math:`c_{ij}` in the paper is applied.
How to apply the normalizer. Can be one of the following values:
* ``right``, to divide the aggregated messages by each node's in-degrees,
which is equivalent to averaging the received messages.
* ``none``, where no normalization is applied.
* ``both`` (default), where the messages are scaled with :math:`1/c_{ji}` above, equivalent
to symmetric normalization.
* ``left``, to divide the messages sent out from each node by its out-degrees,
equivalent to random walk normalization.
weight : bool, optional
If True, apply a linear layer. Otherwise, aggregating the messages
without a weight matrix.
Expand Down Expand Up @@ -137,8 +145,8 @@ def __init__(self,
activation=None,
allow_zero_in_degree=False):
super(GraphConv, self).__init__()
if norm not in ('none', 'both', 'right'):
raise DGLError('Invalid norm value. Must be either "none", "both" or "right".'
if norm not in ('none', 'both', 'right', 'left'):
raise DGLError('Invalid norm value. Must be either "none", "both", "right" or "left".'
' But got "{}".'.format(norm))
self._in_feats = in_feats
self._out_feats = out_feats
Expand Down Expand Up @@ -230,13 +238,15 @@ def call(self, graph, feat, weight=None):
'suppress the check and let the code run.')

feat_src, feat_dst = expand_as_pair(feat, graph)

if self._norm == 'both':
if self._norm in ['both', 'left']:
degs = tf.clip_by_value(tf.cast(graph.out_degrees(), tf.float32),
clip_value_min=1,
clip_value_max=np.inf)
norm = tf.pow(degs, -0.5)
shp = norm.shape + (1,) * (feat_src.ndim - 1)
if self._norm == 'both':
norm = tf.pow(degs, -0.5)
else:
norm = 1.0 / degs
shp = norm.shape + (1,) * (feat_dst.ndim - 1)
norm = tf.reshape(norm, shp)
feat_src = feat_src * norm

Expand Down Expand Up @@ -265,7 +275,7 @@ def call(self, graph, feat, weight=None):
if weight is not None:
rst = tf.matmul(rst, weight)

if self._norm != 'none':
if self._norm in ['both', 'right']:
degs = tf.clip_by_value(tf.cast(graph.in_degrees(), tf.float32),
clip_value_min=1,
clip_value_max=np.inf)
Expand Down
2 changes: 1 addition & 1 deletion tests/mxnet/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def test_graph_conv(idtype, out_dim):

@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [False])
@pytest.mark.parametrize('out_dim', [1, 2])
Expand Down
2 changes: 1 addition & 1 deletion tests/pytorch/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def test_graph_conv0(out_dim):

@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('out_dim', [1, 2])
Expand Down
2 changes: 1 addition & 1 deletion tests/tensorflow/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def test_graph_conv(out_dim):

@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('norm', ['none', 'both', 'right', 'left'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('out_dim', [1, 2])
Expand Down

0 comments on commit b576e61

Please sign in to comment.