You could simply create two input heads. One for the embedding, which goes through its own neural network and then one for the two features. The output of both networks are then simply concatinated and passed into a final layer.
Since for the one input head there is only two features (probably a vector of size two, right?)
You can combine two neural network modules simply like this:
# create a seperate network for your embedding input
class EmbeddingModel(nn.Module):
def __init__(self):
super(EmbeddingModel, self).__init__()
self.layer1 = nn.Linear(...)
. . .
self.layerN = nn.Linear(...)
def forward(self, x):
x = F.activation(self.layer1(x))
. . .
x = F.activation(self.layerN(x))
return x
# create a one layer network for your "two important features"
# use the same activation function as the last layer of the "EmbeddingModel"
class FeaturesModel(nn.Module):
def __init__(self):
super(FeaturesModel, self).__init__()
self.layer1 = nn.Linear(...)
def forward(self, x):
x = F.activation(self.layer1(x))
return x
# finally create your main-model which combines both
class MainModel(nn.Module):
def __init__(self):
super(MainModel, self).__init__()
self.embeddingModel = EmbeddingModel()
self.featuresModel = FeaturesModel()
# the input-dim to this layer has to be the output-dim of the embeddingModel + the output-dim of the featureModel
self.outputLayer = nn.Linear(...)
def forward(self, x_embeggings, x_features):
x_embeggings = self.embeddingModel(x_embeggings)
x_features = self.featuresModel(x_features)
x = torch.cat((x_embeddings, x_features), -1)
x = F.activation(self.outputLayer(x))
return x