How to add nodes features into RGCN tutorial?

As we all know, the tutorial only consider relation of graph. rgcn. However, we want to add nodes’ features and take it into message pasisng process. I find that it is really hard to add this into message_func and apply_func because my shallow understanding of it.

Could any one tell me how to do that? Thanks and best regards.

You can follow the example here to see how to get node feature from dataloader, the example here is a homogeneous graph. It is similar to get node data for heterogeneous graph, you can follow the python documement here to see how to set prefetch_node_feats for rgcn.

BTW, we have a simpler version of RGCN example here, it might be easier for your to play with.

@frozenbugs
Actually, I’ve tried to edit the code. only change the embed_layer() function, because original code seems to initialize a Xavier tensor to feature h if h is None.
1.I add a MLP layer, because my features from the graph have different dimensions. I use MLP to scale this shape into the same.

class GraphFeatureExtractor(nn.Module):
    def __init__(self, g, mlp_out):
        super(GraphFeatureExtractor, self).__init__()
        self.g = g
        self.mlp_out = mlp_out
        self.features = nn.ParameterDict()
        for ntype in g.ntypes:
            feature_size = g.nodes[ntype].data['h'].size(1)
            features = nn.Parameter(th.Tensor(g.num_nodes(ntype), feature_size))
            nn.init.xavier_uniform_(features, gain=nn.init.calculate_gain("relu"))
            self.features[ntype] = features

    def extract_features(self, g):
        param_dict = th.nn.ParameterDict()
        for ntype in g.ntypes:
            features = g.nodes[ntype].data['h']
            param_dict[ntype] = Parameter(features)
        return param_dict

    def mlp(self, mlp_in, mlp_out):
        return nn.Sequential(
            nn.Linear(mlp_in, mlp_out),
            nn.ReLU(),
            # nn.Dropout(),
            # nn.Linear(mlp_out, mlp_out)
        )

    def unify_dimensions(self, param_dict):
        for key in param_dict.keys():
            mlp_in = param_dict[key].size(1)
            mlp_model = self.mlp(mlp_in, self.mlp_out)
            param_dict[key] = mlp_model(param_dict[key])
        return param_dict

    def forward(self, g):
        param_dict = self.extract_features(g)
        unified_param_dict = self.unify_dimensions(param_dict)
        for ntype, tensor in unified_param_dict.items():
            print(f"{ntype} feature shape: {tensor.shape}")
        return unified_param_dict
  1. then, I try to delete the class EntityClassify_HeteroAPI and RelGraphConvLayerHeteroAPI, becasue I find the code doesn’t run through these two class.
    3.finally, I change the RelGraphEmbed into my own GraphFeatureExtractor to create h embeddings.

I also try to edit the input_dim and output_dim, becasue 3 layers RGCN’s input and output scheme should be:
MLP input, MLP output → RGCN input == MLP output, RGCN output → hidden, hidden → hidden, number of class

However, chain errors happen. Could you help me debug this code?

"""RGCN layer implementation"""
from collections import defaultdict

import dgl
import dgl.function as fn
import dgl.nn as dglnn

import torch as th
import torch.nn as nn
import torch.nn.functional as F
import tqdm
from extract_features import GraphFeatureExtractor




class RelGraphConvLayer(nn.Module):
    r"""Relational graph convolution layer.

    Parameters
    ----------
    in_feat : int
        Input feature size.
    out_feat : int
        Output feature size.
    rel_names : list[str]
        Relation names.
    num_bases : int, optional
        Number of bases. If is none, use number of relations. Default: None.
    weight : bool, optional
        True if a linear layer is applied after message passing. Default: True
    bias : bool, optional
        True if bias is added. Default: True
    activation : callable, optional
        Activation function. Default: None
    self_loop : bool, optional
        True to include self loop message. Default: False
    dropout : float, optional
        Dropout rate. Default: 0.0
    """

    def __init__(
        self,
        in_feat,
        out_feat,
        rel_names,
        num_bases,
        *,
        weight=True,
        bias=True,
        activation=None,
        self_loop=False,
        dropout=0.0,
        bias_feat_dim = None
    ):
        super(RelGraphConvLayer, self).__init__()
        self.in_feat = in_feat
        self.out_feat = out_feat
        self.rel_names = rel_names
        self.num_bases = num_bases
        self.bias = bias
        self.activation = activation
        self.self_loop = self_loop
        self.conv = dglnn.HeteroGraphConv(
            {
                rel: dglnn.GraphConv(
                    in_feat, out_feat, norm="right", weight=False, bias=False
                )
                for rel in rel_names
            }
        )

        self.use_weight = weight
        self.use_basis = num_bases < len(self.rel_names) and weight
        if self.use_weight:
            if self.use_basis:
                self.basis = dglnn.WeightBasis(
                    (in_feat, out_feat), num_bases, len(self.rel_names)
                )

            else:
                self.weight = nn.Parameter(
                    th.Tensor(len(self.rel_names), in_feat, out_feat)
                )
                nn.init.xavier_uniform_(
                    self.weight, gain=nn.init.calculate_gain("relu")
                )

        # bias
        if bias:
            bias_dim = bias_feat_dim if bias_feat_dim else out_feat  # 如果设置了bias_feat_dim,就使用它
            self.h_bias = nn.Parameter(th.Tensor(bias_dim))
            nn.init.zeros_(self.h_bias)

        # weight for self loop
        if self.self_loop:
            self.loop_weight = nn.Parameter(th.Tensor(in_feat, out_feat))
            nn.init.xavier_uniform_(
                self.loop_weight, gain=nn.init.calculate_gain("relu")
            )

        self.dropout = nn.Dropout(dropout)

    def forward(self, g, inputs):
        """Forward computation

        Parameters
        ----------
        g : DGLGraph
            Input graph.
        inputs : dict[str, torch.Tensor]
            Node feature for each node type.

        Returns
        -------
        dict[str, torch.Tensor]
            New node features for each node type.
        """
        for ntype, tensor in inputs.items():
            print(f"Input {ntype} feature shape: {tensor.shape}")
        g = g.local_var()
        if self.use_weight:
            weight = self.basis() if self.use_basis else self.weight
            wdict = {
                self.rel_names[i]: {"weight": w.squeeze(0)}
                for i, w in enumerate(th.split(weight, 1, dim=0))
            }

        else:
            wdict = {}

        if g.is_block:
            inputs_src = inputs
            inputs_dst = {
                k: v[: g.number_of_dst_nodes(k)] for k, v in inputs.items()
            }
        else:
            inputs_src = inputs_dst = inputs

        hs = self.conv(g, inputs, mod_kwargs=wdict)

        def _apply(ntype, h):
            if self.self_loop:
                h = h + th.matmul(inputs_dst[ntype], self.loop_weight)
            if self.bias:
                h = h + self.h_bias
            if self.activation:
                h = self.activation(h)
            return self.dropout(h)

        for ntype, tensor in hs.items():
            print(f"Output {ntype} feature shape: {tensor.shape}")

        return {ntype: _apply(ntype, h) for ntype, h in hs.items()}





class RelGraphEmbed(nn.Module):
    r"""Embedding layer for featureless heterograph."""

    def __init__(
        self, g, embed_size, embed_name="embed", activation=None, dropout=0.0
    ):
        super(RelGraphEmbed, self).__init__()
        self.g = g
        self.embed_size = embed_size
        self.embed_name = embed_name
        self.activation = activation
        self.dropout = nn.Dropout(dropout)

        # create weight embeddings for each node for each relation
        self.embeds = nn.ParameterDict()
        for ntype in g.ntypes:
            embed = nn.Parameter(th.Tensor(g.num_nodes(ntype), self.embed_size))
            nn.init.xavier_uniform_(embed, gain=nn.init.calculate_gain("relu"))
            self.embeds[ntype] = embed

    def forward(self, block=None):
        """Forward computation

        Parameters
        ----------
        block : DGLGraph, optional
            If not specified, directly return the full graph with embeddings stored in
            :attr:`embed_name`. Otherwise, extract and store the embeddings to the block
            graph and return.

        Returns
        -------
        DGLGraph
            The block graph fed with embeddings.
        """
        return self.embeds


class EntityClassify(nn.Module):
    def __init__(
        self,
        g,
        h_dim,
        out_dim,
        mlp_out,
        num_bases,
        num_hidden_layers=1,
        dropout=0,
        use_self_loop=False,
    ):
        super(EntityClassify, self).__init__()
        self.g = g
        self.h_dim = h_dim
        self.out_dim = out_dim
        self.mlp_out = mlp_out
        self.rel_names = list(set(g.etypes))
        self.rel_names.sort()
        if num_bases < 0 or num_bases > len(self.rel_names):
            self.num_bases = len(self.rel_names)
        else:
            self.num_bases = num_bases
        self.num_hidden_layers = num_hidden_layers
        self.dropout = dropout
        self.use_self_loop = use_self_loop

        self.embed_layer = GraphFeatureExtractor(self.g, self.mlp_out)
        self.layers = nn.ModuleList()
        # i2h
        self.layers.append(
            RelGraphConvLayer(
                self.mlp_out,
                self.h_dim,
                self.rel_names,
                self.num_bases,
                activation=F.relu,
                self_loop=self.use_self_loop,
                dropout=self.dropout,
                weight=False,
            )
        )
        # h2h
        for i in range(self.num_hidden_layers):
            self.layers.append(
                RelGraphConvLayer(
                    self.h_dim,
                    self.h_dim,
                    self.rel_names,
                    self.num_bases,
                    activation=F.relu,
                    self_loop=self.use_self_loop,
                    dropout=self.dropout,
                )
            )
        # h2o
        self.layers.append(
            RelGraphConvLayer(
                self.h_dim,
                self.out_dim,
                self.rel_names,
                self.num_bases,
                activation=None,
                self_loop=self.use_self_loop,
            )
        )

    def forward(self, h=None, blocks=None):
        if h is None:
            # full graph training
            h = self.embed_layer(self.g)
        if blocks is None:
            # full graph training
            for layer in self.layers:
                h = layer(self.g, h)
        else:
            # minibatch training
            for layer, block in zip(self.layers, blocks):
                h = layer(block, h)
        return h

The error message is:

Traceback (most recent call last):
  File "/Users/zzhao/Downloads/rgcn-hetero/entity_classify.py", line 189, in <module>
    main(args)
  File "/Users/zzhao/Downloads/rgcn-hetero/entity_classify.py", line 98, in main
    logits = model()[category]
  File "/Users/zzhao/anaconda3/envs/graphtest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
    return forward_call(*input, **kwargs)
  File "/Users/zzhao/Downloads/rgcn-hetero/model.py", line 274, in forward
    h = layer(self.g, h)
  File "/Users/zzhao/anaconda3/envs/graphtest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
    return forward_call(*input, **kwargs)
  File "/Users/zzhao/Downloads/rgcn-hetero/model.py", line 140, in forward
    hs = self.conv(g, inputs, mod_kwargs=wdict)
  File "/Users/zzhao/anaconda3/envs/graphtest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
    return forward_call(*input, **kwargs)
  File "/Users/zzhao/anaconda3/envs/graphtest/lib/python3.9/site-packages/dgl/nn/pytorch/hetero.py", line 210, in forward
    dstdata = self._get_module((stype, etype, dtype))(
  File "/Users/zzhao/anaconda3/envs/graphtest/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1190, in _call_impl
    return forward_call(*input, **kwargs)
  File "/Users/zzhao/anaconda3/envs/graphtest/lib/python3.9/site-packages/dgl/nn/pytorch/conv/graphconv.py", line 460, in forward
    rst = th.matmul(rst, weight)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (78x128 and 64x64)

solved… because the weight and bias’s shape should be matched…
the first layer of rgcn weight = False. I neglected that.

by the way anyone who knows that why the first layer of rgcn weight = false has a better performance than weight=true?

This topic was automatically closed 30 days after the last reply. New replies are no longer allowed.