Hi,
I’m wondering if I can use a 2d parameter variable in pytorch as the input graph of the dgl model. The code is like this:
class Encoder(nn.Module):
def __init__(self, input_size: int, hidden_size: int, T: int):
"""
input size: number of underlying factors (81)
T: number of time steps (10)
hidden_size: dimension of the hidden state
"""
super(Encoder, self).__init__()
self.num_of_node = num_of_nodesize
# the adj mat
self.W = nn.Parameter(torch.randn(input_size, input_size), requires_grad=True)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# obtain the adj matrix
nx_graph = nx.from_numpy_matrix(self.W.detach().cpu().numpy())
self.gs = DGLGraph(nx_graph)
# define a gat model
heads = ([8] * 1) + [1]
self.gat = GAT(self.gs,
num_layers=1,
in_dim=(T-1),
num_hidden=hidden_size,
num_classes=1,
heads=heads,
activation=F.elu,
feat_drop=0.6,
attn_drop=0.6,
negative_slope=0.2,
residual=False
)
self.gat.cuda()
def forward(self, input_data):
# input_data: (batch_size, num_of_node, feature_dim)
input_weighted = torch.zeros(input_data.size(0), self.num_of_node).cuda()
for i in range(input_data.shape[0]):
input_weighted[i] = self.gat(input_data[i]))[:, 0]
return input_weighted
The output will be used furtherly to calculate a loss. Although the code does not yeild any error, it seems the whole codes would take up lots of GPU memory. I’m not sure where is wrong.
Thanks in advance!
Shanchao Yang