Hi! I’ve recently started with geometric deep learning and finding my way around DGL, the community is awesome.
I wanted to use edge weights with GAT, so far i’ve only used node features. Here’s the code:
class GATLayer(nn.Module):
def __init__(self,
in_dim,
out_dim,
num_heads,
feat_drop=0.,
attn_drop=0.,
alpha=0.2,
agg_activation=F.elu):
super(GATLayer, self).__init__()
self.num_heads = num_heads
self.feat_drop = nn.Dropout(feat_drop)
self.fc = nn.Linear(in_dim, num_heads * out_dim, bias=False)
self.attn_l = nn.Parameter(torch.Tensor(size=(num_heads, out_dim, 1)))
self.attn_r = nn.Parameter(torch.Tensor(size=(num_heads, out_dim, 1)))
self.attn_drop = nn.Dropout(attn_drop)
self.activation = nn.LeakyReLU(alpha)
self.softmax = edge_softmax
self.agg_activation = agg_activation
def clean_data(self):
ndata_names = ['ft', 'a1', 'a2']
edata_names = ['a_drop']
for name in ndata_names:
self.g.ndata.pop(name)
for name in edata_names:
self.g.edata.pop(name)
def forward(self, bg, features):
self.g = bg
# h = self.feat_drop(features)
h = features
ft = self.fc(h).reshape((h.shape[0], self.num_heads, -1))
head_ft = ft.transpose(0, 1)
a1 = torch.bmm(head_ft, self.attn_l).transpose(0, 1)
# print(a1)
a2 = torch.bmm(head_ft, self.attn_r).transpose(0, 1)
self.g.ndata.update({'ft': ft, 'a1': a1, 'a2': a2})
self.g.apply_edges(self.edge_attention)
self.edge_softmax()
self.g.update_all(fn.src_mul_edge('ft', 'a_drop', 'ft'), fn.sum('ft', 'ft'))
ret = self.g.ndata['ft']
ret = ret.flatten(1)
if self.agg_activation is not None:
ret = self.agg_activation(ret)
self.clean_data()
return ret
def edge_attention(self, edges):
a = self.activation(edges.src['a1'] + edges.dst['a2'])
return {'a': a}
def edge_softmax(self):
attention = self.softmax(self.g, self.g.edata.pop('a'))
self.g.edata['a_drop'] = self.attn_drop(attention)
class GATClassifier(nn.Module):
def __init__(self, in_dim, hidden_dim, num_heads, n_classes):
super(GATClassifier, self).__init__()
self.layers = nn.ModuleList()
self.layers.append(GATLayer(in_dim, hidden_dim, num_heads))
self.layers.append(GATLayer(hidden_dim * num_heads, hidden_dim, num_heads))
self.classify = nn.Linear(hidden_dim * num_heads, n_classes)
def forward(self, bg, features):
h = features
for i, gnn in enumerate(self.layers):
h = gnn(bg, h)
# print(h)
bg.ndata['h'] = h
hg = dgl.mean_nodes(bg, 'h')
return self.classify(hg)`
I’ve used weighted edges with GCN using this update function:
g.update_all(fn.u_mul_e(lhs_field='h', rhs_field='w', out='m'), fn.sum('m', 'h_neigh'))
is there a similar way for GATs or anything that can help me include edge weights?