Hello, thank you for your interest! I will try to give you all the elements that you need to understand the situation.
My timing experiments give the following results:
- A GCNlayer.forward takes on average 0.7s, against 47s for the AnisotropicGCN
- The functions gcn_msg, gcn_reduce, NodeApplyModule.forward and pop (to retrieve the results) are all very fast
- On the 47s of the AnisotropicGCN.forward, 45s are used by the recv function.
Other observations:
- I tried two different methods to provide the list of edges: give a python list of pairs of integers, and convert this list to a 1d Torch.LongTensor using the graph.edgeid method. I did not see a big difference in the runtime.
- I don’t have edge data.
- Although my model is on the GPU, it seems that the load of the GPU is very light, almost everything happens on the CPU.
Here is my file to define the layers: the anisotropic method is a very light modification to the standard GCN that I copied:
From https://github.com/dmlc/dgl/blob/master/examples/pytorch/gcn/gcn.py
import torch
import torch.nn as nn
import math
import dgl
import time
def anisotropic_gcn_msg(edge):
msg = edge.src['temp'] * edge.src['norm']
return {'m': msg}
def gcn_msg(edge):
msg = edge.src['h'] * edge.src['norm']
return {'m': msg}
def gcn_reduce(node):
accum = torch.sum(node.mailbox['m'], 1) * node.data['norm']
return {'h': accum}
class NodeApplyModule(nn.Module):
def __init__(self, out_feats, activation=None, bias=True):
super(NodeApplyModule, self).__init__()
if bias:
self.bias = nn.Parameter(torch.Tensor(out_feats))
else:
self.bias = None
self.activation = activation
self.reset_parameters()
def reset_parameters(self):
if self.bias is not None:
stdv = 1. / math.sqrt(self.bias.size(0))
self.bias.data.uniform_(-stdv, stdv)
def forward(self, nodes):
h = nodes.data['h']
if self.bias is not None:
h = h + self.bias
if self.activation:
h = self.activation(h)
return {'h': h}
class GCNLayer(nn.Module):
def __init__(self, g, in_feats, out_feats, activation, bias=True):
super().__init__()
self.g = g
self.weight = nn.Parameter(torch.Tensor(in_feats, out_feats))
self.node_update = NodeApplyModule(out_feats, activation, bias)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, h):
self.g.ndata['h'] = torch.mm(h, self.weight)
self.g.update_all(gcn_msg, gcn_reduce, self.node_update)
h = self.g.ndata.pop('h')
return h
class AnisotropicGCNLayer(nn.Module):
def __init__(self, g, edge_lists: list, dimensions: int, in_feats: int, out_feats: int,
activation, bias=True):
""" g (DGL directed graph)
in_feats: number of input channels
out_feats: number of output units
activation: usually F.relu or None
dimensions: number of dimensions in the cartesian product
bias: if True, add bias to the linear layer"""
super().__init__()
self.g = g
self.dimensions = dimensions
self.list_edges = edge_lists
self.weight = nn.Parameter(torch.Tensor(in_feats, out_feats, dimensions))
self.node_update = NodeApplyModule(out_feats, activation, bias)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, h):
for dim in range(self.dimensions):
self.g.ndata['temp'] = torch.mm(h, self.weight[:, :, dim])
self.g.send(self.list_edges[dim], anisotropic_gcn_msg)
self.g.recv(reduce_func=gcn_reduce, apply_node_func=self.node_update)
self.g.ndata.pop('temp')
h = self.g.ndata.pop('h')
return h
I tried to uncomment the line in runtime.py, but I don’t know how to interpret the result. Here is what I obtain on one call to the AnisotropicGCN.forward():
FeatDict _z6 = READ_ROW(_z0, _z3)
FeatDict _z7 = READ_ROW(_z0, _z4)
FeatDict _z8 = READ_ROW(_z1, _z5)
FeatDict _z10 = EDGE_UDF(_z9, _z6, _z8, z7)
APPEND_ROW(_z2, _z10)
Send took 0.485 s
FeatDict _z6 = READ_ROW(_z0, _z3)
FeatDict _z7 = READ_ROW(_z0, _z4)
FeatDict _z8 = READ_ROW(_z1, _z5)
FeatDict _z10 = EDGE_UDF(_z9, _z6, _z8, z7)
APPEND_ROW(_z2, _z10)
Send took 0.332 s
FeatDict _z6 = READ_ROW(_z0, _z3)
FeatDict _z7 = READ_ROW(_z0, _z4)
FeatDict _z8 = READ_ROW(_z1, _z5)
FeatDict _z10 = EDGE_UDF(_z9, _z6, _z8, z7)
APPEND_ROW(_z2, _z10)
Send took 0.361 s
FeatDict _z4 = READ_ROW(nf, _z1)
FeatDict _z5 = READ_ROW(msg, _z2)
FeatDict _z4 = NODE_UDF(_z3, _z4, _z5)
FeatDict _z9 = READ_ROW(nf, _z6)
FeatDict _z10 = READ_ROW(msg, _z7)
FeatDict _z9 = NODE_UDF(_z8, _z9, _z10)
FeatDict _z14 = READ_ROW(nf, _z11)
FeatDict _z15 = READ_ROW(msg, _z12)
FeatDict _z14 = NODE_UDF(_z13, _z14, _z15)
FeatDict _z19 = READ_ROW(nf, _z16)
FeatDict _z20 = READ_ROW(msg, _z17)
FeatDict _z19 = NODE_UDF(_z18, _z19, _z20)
FeatDict _z22 = MERGE_ROW(_z21, _z4, _z9, _z14, z19)
WRITE_DICT(_z0, _z22)
FeatDict _z23 = READ_ROW(nf, recv_nodes)
FeatDict _z24 = UPDATE_DICT(_z23, _z0)
FeatDict _z26 = NODE_UDF(_z25, _z24)
FeatDict _z27 = UPDATE_DICT(_z0, z26)
WRITE_ROW(nf, recv_nodes, _z27)
Recv took 45.900 s
Pop took 0.000 s
AnisotropicGCN took 47.081 s
Do these elements help you?