Here is what I ended up doing when I learned that a dataset is just a list of tuples [(g0,l0),(g1,l1)...]
passed into th.DataLoader.
It runs to completion, but not sure if everything is working as intended.
labels = [1,1,1,1,1,1,1,0,0,0]
#simplified version
graphs = [G0,G1,G2,G3,G4,G5,G6,G7,G8,G9]
# could fetch w numpy unique if i needed to
num_classes = 2
graph_count = len(graphs)
samples_train = []
for i in range(graph_count):
current_graph = graphs[i]
current_label = labels[i]
pair = (current_graph, current_label)
samples_train.append(pair)
import random
random.shuffle(samples_train)
train_split_pct = 0.3
train_split_index = round(graph_count*train_split_pct)
samples_test = samples_train[:train_split_index]
del samples_train[:train_split_index]
import dgl
import torch
def collate(samples):
# (graph, label).
graphs, labels = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
return batched_graph, torch.tensor(labels)
from dgl.nn.pytorch import GraphConv
import torch.nn as nn
import torch.nn.functional as F
class Classifier(nn.Module):
def __init__(self, in_dim, hidden_dim, n_classes):
super(Classifier, self).__init__()
self.conv1 = GraphConv(in_dim, hidden_dim)
self.conv2 = GraphConv(hidden_dim, hidden_dim)
self.classify = nn.Linear(hidden_dim, n_classes)
def forward(self, g):
# Use node degree as the initial node feature. For undirected graphs, the in-degree
# is the same as the out_degree.
h = g.in_degrees().view(-1, 1).float()
# Perform graph convolution and activation function.
h = F.relu(self.conv1(g, h))
h = F.relu(self.conv2(g, h))
g.ndata['h'] = h
# Calculate graph representation by averaging all the node representations.
hg = dgl.mean_nodes(g, 'h')
return self.classify(hg)
import torch.optim as optim
from torch.utils.data import DataLoader
# Use PyTorch's DataLoader and the collate function
# defined before.
# data_loader = DataLoader(trainset, batch_size=32, shuffle=True,
# collate_fn=collate)
dataset_train = DataLoader(
samples_train,
batch_size=32,
shuffle=True,
collate_fn=collate
)
#dataset_test = DataLoader(
# samples_test,
# batch_size=32,
# shuffle=True,
# collate_fn=collate
#)
model = Classifier(1, 256, num_classes)
loss_func = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.010)#0.001
model.train()
epochs = 50
epoch_losses = []
for epoch in range(epochs):
epoch_loss = 0
for iter, (g, l) in enumerate(dataset_train):
prediction = model(g)
loss = loss_func(prediction, l)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_loss /= (iter + 1)
print('Epoch {}, loss {:.4f}'.format(epoch, epoch_loss))
epoch_losses.append(epoch_loss)
plt.title('cross entropy averaged over minibatches')
plt.plot(epoch_losses)
plt.show()
model.eval()
# Convert a list of tuples to two lists
# remember X is features and Y is labels.
test_X, test_Y = map(list, zip(*samples_test))
test_bg = dgl.batch(test_X)
test_Y = torch.tensor(test_Y).float().view(-1, 1)
probs_Y = torch.softmax(model(test_bg), 1)
sampled_Y = torch.multinomial(probs_Y, 1)
argmax_Y = torch.max(probs_Y, 1)[1].view(-1, 1)
print('Accuracy of sampled predictions on the test set: {:.4f}%'.format(
(test_Y == sampled_Y.float()).sum().item() / len(test_Y) * 100))
print('Accuracy of argmax predictions on the test set: {:4f}%'.format(
(test_Y == argmax_Y.float()).sum().item() / len(test_Y) * 100))