class Classifier(nn.Module):
def __init__(self, 1, 1024, 2):
super(Classifier, self).__init__()
self.conv1 = WSGConv(1, 1024, activation=F.relu)
self.conv2 = WSGConv(1024, 1024, activation=F.relu)
self.conv3 = WSGConv(1024, 512, activation=F.relu)
self.jk = JumpingKnowledge()
self.sortpool = SortPooling(k=10)
self.conv1D_1 = nn.Conv1d(in_channels=1, out_channels=256, kernel_size=2560, stride=2560)
self.maxpool = nn.MaxPool1d(2)
self.conv1D_2 = nn.Conv1d(in_channels=256, out_channels=512, kernel_size=2, stride=1)
self.fc1 = nn.Linear(4*512, 1024)
self.classify = nn.Linear(1024, n_classes)
def forward(self, g):
h = g.in_degrees().view(-1, 1).float()
h1 = self.conv1(g, h)
h1 = h1.flatten(1)
h2 = self.conv2(g, h1)
h2 = h2.flatten(1)
h3 = self.conv3(g, h2)
h3 = h3.flatten(1)
h = self.jk([h1, h2, h3])
h = self.sortpool(g, h)
h = h.view(-1, 1, 25600)
h = self.conv1D_1(h)
h = self.maxpool(h)
h = self.conv1D_2(h)
h = h.flatten(1)
h = F.relu(self.fc1(h))
h = F.dropout(h, p=0.25)
return self.classify(h)
class WSGConv(nn.Module):
def __init__(self, in_feats, out_feats, bias=True, activation=None):
super(WSGConv, self).__init__()
self._in_feats = in_feats
self._out_feats = out_feats
self.activation = activation
self.fc_layer = nn.Linear(self._in_feats * 3, out_feats, bias=True)
self.coef_self = nn.Parameter(torch.FloatTensor([1]))
self.coef_posi = nn.Parameter(torch.FloatTensor([1]))
self.coef_nega = nn.Parameter(torch.FloatTensor([1]))
if bias:
self.bias = nn.Parameter(torch.zeros(self._out_feats))
else:
self.register_buffer("bias", None)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
nn.init.xavier_uniform_(self.fc_layer.weight, gain=gain)
def forward(self, graph, feat):
with graph.local_scope():
# Split a weighted signed graph into a positive and negative signed graph
h_self = feat
graph.ndata['h'] = h_self
posiedges_index = torch.nonzero(graph.edata['w'] > 0).squeeze()
negaedges_index = torch.nonzero(graph.edata['w'] < 0).squeeze()
g_positive = dgl.graph((graph.edges()[0], graph.edges()[1]))
g_positive.ndata['h'] = graph.ndata['h']
g_positive.edata['w'] = graph.edata['w']
g_positive.remove_edges(negaedges_index)
g_negative = dgl.graph((graph.edges()[0], graph.edges()[1]))
g_negative.ndata['h'] = graph.ndata['h']
g_negative.edata['w'] = abs(graph.edata['w'])
g_negative.remove_edges(posiedges_index)
g_positive.edata['w'] = edge_softmax(g_positive, g_positive.edata['w'])
g_negative.edata['w'] = edge_softmax(g_negative, g_negative.edata['w'])
# Message Passing
g_positive.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h'))
g_negative.update_all(fn.u_mul_e('h', 'w', 'm'), fn.sum('m', 'h'))
graph.ndata['h'] = self.coef_self * graph.ndata['h']
g_positive.ndata['h'] = self.coef_posi * g_positive.ndata['h']
g_negative.ndata['h'] = self.coef_nega * g_negative.ndata['h']
feat_box = [graph.ndata['h'], g_positive.ndata['h'], g_negative.ndata['h']]
feat_combined = torch.cat(feat_box, dim=1)
degres = (graph.in_degrees() + 1.0).unsqueeze(1)
h_new = torch.div(feat_combined, degres)
rst = self.fc_layer(h_new)
# bias term
if self.bias is not None:
rst = rst + self.bias
# activation
if self.activation is not None:
rst = self.activation(rst)
return rst
WSGConv layer is my own weight signed graph message passing method.
for epoch in range(300):
epoch_loss = 0
for iter, (batchg, label) in enumerate(train_loader):
batchg, label = batchg.to(DEVICE), label.to(DEVICE)
predict = model(batchg)
loss = loss_func(predict, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
The following is an error message:
RuntimeError Traceback (most recent call last)
Cell In[4], line 38
36 loss = loss_func(predict, label)
37 optimizer.zero_grad()
---> 38 loss.backward()
39 optimizer.step()
40 epoch_loss += loss.detach().item()
File D:\Anaconda3\envs\Graph_Network\lib\site-packages\torch\_tensor.py:396, in Tensor.backward(self, gradient, retain_graph, create_graph, inputs)
387 if has_torch_function_unary(self):
388 return handle_torch_function(
389 Tensor.backward,
390 (self,),
(...)
394 create_graph=create_graph,
395 inputs=inputs)
--> 396 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File D:\Anaconda3\envs\Graph_Network\lib\site-packages\torch\autograd\__init__.py:173, in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
168 retain_graph = create_graph
170 # The reason we repeat same the comment below is that
171 # some Python versions print out the first line of a multi-line function
172 # calls in the traceback and some print out the last line
--> 173 Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
174 tensors, grad_tensors_, retain_graph, create_graph, inputs,
175 allow_unreachable=True, accumulate_grad=True)
RuntimeError: CUDA error: device-side assert triggered
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.