Exploring latent space

Hi, I don’t know if this is maybe more of a pytorch question, but in a trained network defined as:

class Classifier(nn.Module):
    def __init__(self, in_dim, hidden_dim_graph,hidden_dim1,n_classes,dropout):
        super(Classifier, self).__init__()
        self.conv1 = GraphConv(in_dim, hidden_dim_graph)
        self.conv2 = GraphConv(hidden_dim_graph, hidden_dim1)
        self.classify = nn.Sequential(nn.Linear(hidden_dim1,hidden_dim1),nn.Dropout(dropout))
        self.classify2 = nn.Sequential(nn.Linear(hidden_dim1, n_classes),nn.Dropout(dropout))
        self.out_act = nn.Sigmoid()

    def forward(self, g):
        h = g.ndata['h_n'].float()
        h = F.relu(self.conv1(g, h))
        h = F.relu(self.conv2(g, h))
        g.ndata['h'] = h
        hg = dgl.mean_nodes(g, 'h')
        a2=self.classify(hg)
        a3=self.classify2(a2)
        return self.out_act(a3)

I’d like to explore the hg variable but I am unsure on how to retrieve it. Is there any way to do so? Thanks in avance!

You can simply return hg along with self.out_act(a3).

Thanks so much for this! I am actually training on top of another library that doesn’t work if the forward function has two outputs :frowning:

I was thinking on saving the weights after trained and then use a second function called “retrieve” inside the classifier class. Would you say this is correct?

class GCN2_mod(nn.Module):
    def __init__(self, in_dim, hidden_dim_graph,hidden_dim1,n_classes,dropout):
        super(GCN2_mod, self).__init__()
        self.conv1 = GraphConv(in_feats, h_feats)
        self.conv2 = GraphConv(h_feats, hidden_dim1)
        self.classify = nn.Sequential(nn.Linear(hidden_dim1,hidden_dim1),nn.Dropout(dropout))
        self.classify2 = nn.Sequential(nn.Linear(hidden_dim1, n_classes),nn.Dropout(dropout))
        self.out_act = nn.Sigmoid()
    def forward(self, g):
            in_feat = g.ndata['h_n'].float()
            h = self.conv1(g, in_feat)
            h = F.relu(h)
            h = self.conv1(g, in_feat)
            h = F.relu(h)
            h = self.conv2(g, h)
            g.ndata['h'] = h
            hg = dgl.mean_nodes(g, 'h')
            a2=self.classify(hg)
            a3=self.classify2(a2)
            return self.out_act(a3)
    def retrieve(self,g):
            in_feat = g.ndata['h_n'].float()
            h = self.conv1(g, in_feat)
            h = F.relu(h)
            h = self.conv1(g, in_feat)
            h = F.relu(h)
            h = self.conv2(g, h)
            g.ndata['h'] = h
            hg = dgl.mean_nodes(g, 'h')
            return hg

This topic was automatically closed 30 days after the last reply. New replies are no longer allowed.