Thanks for making this awsome graph learning framework.
I’m trying to reproduce the results by using Keras+Tensorflow, based on Kipf’s Keras version of GCN.
I tried to construct a model similar to your gcn_concat, with the concatenation and 10 stacking gcn layers, as described in the README file.
However, I can never get the similar result as in DGL. For Cora, the testing accuracy gets stuck at roughly 80%, no matter how many gcn layers I use. I’m using the same hyperparameters, lr=0.01, dropout=0.5, 16 hidden features, etc.
Could anyone be so kind to offer some help, and take a brief look at my code, just to confirm that my network architecture is correct?
Here is my implementation:
class GraphConvolution(Layer):
"""Basic graph convolution layer as in https://arxiv.org/abs/1609.02907"""
def __init__(self, units, support=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(GraphConvolution, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
self.support = support
assert support >= 1
def compute_output_shape(self, input_shapes):
features_shape = input_shapes[0]
output_shape = (features_shape[0], self.units)
return output_shape # (batch_size, output_dim)
def build(self, input_shapes):
features_shape = input_shapes[0]
assert len(features_shape) == 2
input_dim = features_shape[1]
self.kernel = self.add_weight(shape=(input_dim * self.support,
self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, mask=None):
features = inputs[0]
basis = inputs[1]
supports = K.dot(basis, features)
output = K.dot(supports, self.kernel)
if self.bias:
output += self.bias
return self.activation(output)
N_FILTERS = 16
# A_ will be passed to G, which is the normalized adjacency matrix with self-loop
G = Input(shape=(None, None), batch_shape=(None, None), sparse=True)
# feature input
X_in = Input(shape=(F,))
# Define model architecture
# The model is similar to https://github.com/dmlc/dgl/blob/master/examples/mxnet/gcn/gcn_concat.py
# NOTE: We pass arguments for graph convolutional layers as a list of tensors.
# This is somewhat hacky, more elegant options would require rewriting the Layer base class.
H = GraphConvolution(N_FILTERS, support, activation='relu')([X_in, G])
H = Dropout(0.5)(H)
concatenate_list = [X_in, H]
if args.nlayers > 1:
for i in range(args.nlayers - 1):
H = Concatenate()(concatenate_list)
H = GraphConvolution(N_FILTERS, support, activation='relu')([H, G])
H = Dropout(0.5)(H)
concatenate_list.append(H)
H = Concatenate()(concatenate_list)
H = Dropout(0.5)(H)
Y = Dense(n_classes, activation='softmax')(H)
Thanks in advance.