0

I got error ("AssertionError: Found no field that needed padding; we are surprised you got this error, please open an issue on github" ).

"AssertionError: Found no field that needed padding; we are surprised you got this error, please open an issue on github"

I don't know why I got this error.

my config file is below.

"""
{
    "dataset_reader": {
        "type": "tbmse_drop",
        "answer_field_generators": {
            "arithmetic_answer": {
                "type": "arithmetic_answer_generator",
                "special_numbers": [
                    100,
                    1
                ]
            },
            "count_answer": {
                "type": "count_answer_generator"
            },
            "passage_span_answer": {
                "type": "span_answer_generator",
                "text_type": "passage"
            },
            "question_span_answer": {
                "type": "span_answer_generator",
                "text_type": "question"
            },
            "tagged_answer": {
                "type": "tagged_answer_generator",
                "ignore_question": false,
                "labels": {
                    "I": 1,
                    "O": 0
                }
            }
        },
        "answer_generator_names_per_type": {
            "date": [
                "arithmetic_answer",
                "passage_span_answer",
                "question_span_answer",
                "tagged_answer"
            ],
            "multiple_span": [
                "tagged_answer"
            ],
            "number": [
                "arithmetic_answer",
                "count_answer",
                "passage_span_answer",
                "question_span_answer",
                "tagged_answer"
            ],
            "single_span": [
                "tagged_answer",
                "passage_span_answer",
                "question_span_answer"
            ]
        },
        "is_training": true,
        "old_reader_behavior": true,
        "pickle": {
            "action": "load",
            "file_name": "all_heads_IO_roberta-large",
            "path": "../pickle/drop"
        },
        "tokenizer": {
            "type": "huggingface_transformers",
            "pretrained_model": "roberta-large"
        }
    },
    "model": {
        "type": "multi_head",
        "dataset_name": "drop",
        "head_predictor": {
            "activations": [
                "relu",
                "linear"
            ],
            "dropout": [
                0.1,
                0
            ],
            "hidden_dims": [
                1024,
                5
            ],
            "input_dim": 2048,
            "num_layers": 2
        },
        "heads": {
            "arithmetic": {
                "type": "arithmetic_head",
                "output_layer": {
                    "activations": [
                        "relu",
                        "linear"
                    ],
                    "dropout": [
                        0.1,
                        0
                    ],
                    "hidden_dims": [
                        1024,
                        3
                    ],
                    "input_dim": 2048,
                    "num_layers": 2
                },
                "special_embedding_dim": 1024,
                "special_numbers": [
                    100,
                    1
                ],
                "training_style": "soft_em"
            },
            "count": {
                "type": "count_head",
                "max_count": 10,
                "output_layer": {
                    "activations": [
                        "relu",
                        "linear"
                    ],
                    "dropout": [
                        0.1,
                        0
                    ],
                    "hidden_dims": [
                        1024,
                        11
                    ],
                    "input_dim": 1024,
                    "num_layers": 2
                }
            },
            "multi_span": {
                "type": "multi_span_head",
                "decoding_style": "at_least_one",
                "ignore_question": false,
                "labels": {
                    "I": 1,
                    "O": 0
                },
                "output_layer": {
                    "activations": [
                        "relu",
                        "linear"
                    ],
                    "dropout": [
                        0.1,
                        0
                    ],
                    "hidden_dims": [
                        1024,
                        2
                    ],
                    "input_dim": 1024,
                    "num_layers": 2
                },
                "prediction_method": "viterbi",
                "training_style": "soft_em"
            },
            "passage_span": {
                "type": "passage_span_head",
                "end_output_layer": {
                    "activations": "linear",
                    "hidden_dims": 1,
                    "input_dim": 1024,
                    "num_layers": 1
                },
                "start_output_layer": {
                    "activations": "linear",
                    "hidden_dims": 1,
                    "input_dim": 1024,
                    "num_layers": 1
                },
                "training_style": "soft_em"
            },
            "question_span": {
                "type": "question_span_head",
                "end_output_layer": {
                    "activations": [
                        "relu",
                        "linear"
                    ],
                    "dropout": [
                        0.1,
                        0
                    ],
                    "hidden_dims": [
                        1024,
                        1
                    ],
                    "input_dim": 2048,
                    "num_layers": 2
                },
                "training_style": "soft_em"
            }
        },
        "passage_summary_vector_module": {
            "activations": "linear",
            "hidden_dims": 1,
            "input_dim": 1024,
            "num_layers": 1
        },
        "pretrained_model": "roberta-large",
        "question_summary_vector_module": {
            "activations": "linear",
            "hidden_dims": 1,
            "input_dim": 1024,
            "num_layers": 1
        }
    },
    "train_data_path": "drop_data/drop_dataset_train.json",
    "validation_data_path": "drop_data/drop_dataset_dev.json",
    "trainer": {
        "num_epochs": 15,
        "optimizer": {
            "type": "adam",
            "lr": 5e-06
        },
        "patience": 10,
        "validation_metric": "+f1"
    },
    "data_loader": {
        "batch_sampler": {
            "type": "bucket",
            "batch_size": 1
        }
    },
    "distributed": {
        "cuda_devices": [
            0,
            1
        ]
    },
    "validation_dataset_reader": {
        "type": "tbmse_drop",
        "answer_field_generators": {
            "arithmetic_answer": {
                "type": "arithmetic_answer_generator",
                "special_numbers": [
                    100,
                    1
                ]
            },
            "count_answer": {
                "type": "count_answer_generator"
            },
            "passage_span_answer": {
                "type": "span_answer_generator",
                "text_type": "passage"
            },
            "question_span_answer": {
                "type": "span_answer_generator",
                "text_type": "question"
            },
            "tagged_answer": {
                "type": "tagged_answer_generator",
                "ignore_question": false,
                "labels": {
                    "I": 1,
                    "O": 0
                }
            }
        },
        "answer_generator_names_per_type": {
            "date": [
                "arithmetic_answer",
                "passage_span_answer",
                "question_span_answer",
                "tagged_answer"
            ],
            "multiple_span": [
                "tagged_answer"
            ],
            "number": [
                "arithmetic_answer",
                "count_answer",
                "passage_span_answer",
                "question_span_answer",
                "tagged_answer"
            ],
            "single_span": [
                "tagged_answer",
                "passage_span_answer",
                "question_span_answer"
            ]
        },
        "is_training": false,
        "old_reader_behavior": true,
        "pickle": {
            "action": "load",
            "file_name": "all_heads_IO_roberta-large",
            "path": "../pickle/drop"
        },
        "tokenizer": {
            "type": "huggingface_transformers",
            "pretrained_model": "roberta-large"
        }
    }
}
"""

I used allennlp 2.0.1 & 2.0.2 (The same error occurred in both versions.)

Igna
  • 1,078
  • 8
  • 18
jhgwak
  • 1

1 Answers1

0

Is it possible that your dataset reader does not produce any instances at all? Alternatively, is is possible that the instances it produces have no fields in them (i.e., are totally empty)? In both of those cases, this error would occur.

If none of these things are happening, try setting sorting_keys for the batch_sampler to whatever the longest field in your instances is.

Dirk Groeneveld
  • 2,547
  • 2
  • 22
  • 23