Thanks for the help, I have other questions about the dimensions of features the input and hidden features have different dimensions so
1.here is important to set the nodes feature or edge features when the goal is link predition
2.I set them like this is right or not, or with another way assign them
3. here input and hidden and output features means input and hidden and output layer size?
4. when I run code, show this error:
TypeError: new() received an invalid combination of arguments - got (dict, dict), but expected one of:
- (*, torch.device device)
didn’t match because some of the arguments have invalid types: (!dict!, !dict!)
- (torch.Storage storage)
- (Tensor other)
- (tuple of ints size, *, torch.device device)
- (object data, *, torch.device device
And the code:
class StochasticTwoLayerRGCN(nn.Module):
def init(self, in_feat, hidden_feat, out_feat, rel_names):
super().init()
self.conv1 = dgl.nn.HeteroGraphConv({
rel : dgl.nn.GraphConv(in_feat, hidden_feat, norm=‘right’)
for rel in rel_names
})
self.conv2 = dgl.nn.HeteroGraphConv({
rel : dgl.nn.GraphConv(hidden_feat, out_feat, norm=‘right’)
for rel in rel_names
})
def forward(self, blocks, x):
x = self.conv1(blocks[0], x)
x = self.conv2(blocks[1], x)
return x
class ScorePredictor(nn.Module):
def forward(self, edge_subgraph, x):
with edge_subgraph.local_scope():
edge_subgraph.ndata[‘x’] = x
for etype in edge_subgraph.canonical_etypes:
edge_subgraph.apply_edges(
dgl.function.u_dot_v(‘x’, ‘x’, ‘score’), etype=etype)
return edge_subgraph.edata[‘score’]
class Model(nn.Module):
def init(self, in_features, hidden_features, out_features,etypes):
super().init()
self.rgcn = StochasticTwoLayerRGCN(
in_features, hidden_features, out_features, etypes)
self.pred = ScorePredictor()
def forward(self, positive_graph, negative_graph, blocks, x):
x = self.rgcn(blocks, x)
pos_score = self.pred(positive_graph, x)
neg_score = self.pred(negative_graph, x)
return pos_score, neg_score
model = Model(in_features={‘user_a’:763372, ‘user_a_con’:6,‘user_a_cell’:800060
,‘user_b’:763372,‘user_b_cell’:800060,‘user_b_con’:5}
, hidden_features={‘user’:763372,‘target’:168,‘protein’:17173,
‘meet’:62246,‘gene’:167,‘cell’:800060,
‘con’:6}, out_features=256,etypes= load_data.canonical_etypes)
model = model.cuda()
opt = torch.optim.Adam(model.parameters())
for input_nodes, positive_graph, negative_graph, blocks in dataloader:
blocks = [b.to(torch.device(‘cuda’)) for b in blocks]
positive_graph = positive_graph.to(torch.device(‘cuda’))
negative_graph = negative_graph.to(torch.device(‘cuda’))
input_features = blocks[0].srcdata[‘features’]
pos_score, neg_score = model(positive_graph, negative_graph, blocks, input_features)
loss = compute_loss(pos_score, neg_score)
opt.zero_grad()
loss.backward()
opt.step()