Source code for easytexminer.model_zoo.models.cnn.modeling_cnn

# coding=utf-8
# Copyright (c) 2020 Alibaba PAI team and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import, division, print_function, unicode_literals

import torch
from torch import nn

[docs]class TextCNNEncoder(nn.Module): r""" This is the abstract class to of cnn encoders Args: config (:obj: TextCNNConfig): The configuration of the TextCNN encoder. Examples:: >>> from easytexminer.model_zoo.models.cnn import TextCNNConfig, TextCNNEncoder >>> # Initializing a cnn configuration >>> configuration = TextCNNConfig() >>> # Initializing a model from the cnn-en style configuration >>> model = TextCNNEncoder(configuration) """ def __init__(self, config): super(TextCNNEncoder, self).__init__() self.model_name = 'text_classify_cnn' self.embedding = nn.Embedding(config.vocab_size, config.embed_size) embed_size = config.embed_size conv_dim = config.conv_dim max_seq_len = config.sequence_length kernel_sizes = [int(num) for num in config.kernel_sizes.split(',')] linear_hidden_size = config.linear_hidden_size self.cnn_encoder = nn.ModuleList([nn.Sequential( nn.Conv1d(in_channels=embed_size, out_channels=conv_dim, kernel_size=kernel_size), nn.BatchNorm1d(conv_dim), nn.ReLU(inplace=True), nn.Conv1d(in_channels=conv_dim, out_channels=conv_dim, kernel_size=kernel_size), nn.BatchNorm1d(conv_dim), nn.ReLU(inplace=True), nn.MaxPool1d(kernel_size=(max_seq_len - kernel_size * 2 + 2)) ) for kernel_size in kernel_sizes]) self.fc_layers = nn.Sequential( nn.Linear(len(kernel_sizes) * conv_dim, linear_hidden_size), nn.BatchNorm1d(linear_hidden_size), nn.ReLU(inplace=True), )
[docs] def forward(self, fact_inputs): fact_embeds = self.embedding(fact_inputs) conv_out = [fact_conv(fact_embeds.permute(0, 2, 1)) for fact_conv in self.cnn_encoder] conv_out = torch.cat(conv_out, dim=1) reshaped = conv_out.view(conv_out.size(0), -1) output = self.fc_layers((reshaped)) return output