I have a problem with ElasticSearch mapping.
for example the mapping for field name
is:
{
"name": {
"type": "keyword",
"fields": {
"ngram": {
"type": "text",
"analyzer": "ngram_analyzer",
"search_analyzer": "ngram_analyzer"
},
"word": {
"type": "text",
"analyzer": "word_analyzer",
"search_analyzer": "word_analyzer"
}
}
}
}
Whole mapping works except search_analyzer
which it seems ElasticSearch ignores that.
analysis settings:
{
"analysis":{
"analyzer":{
"ngram_analyzer":{
"type":"custom",
"char_filter":[
"number_char_filter_map",
"remove_duplicates"
],
"tokenizer":"ngram_tokenizer_whitespace",
"filter":[
"lowercase",
"english_stop"
]
},
"word_analyzer":{
"type":"custom",
"char_filter":[
"number_char_filter_map",
"remove_duplicates"
],
"tokenizer":"word_tokenizer",
"filter":[
"lowercase",
"english_stop"
]
}
},
"char_filter":{
"remove_duplicates":{
"type":"pattern_replace",
"pattern":"(.)(?=\\1)",
"replacement":""
},
"remove_white_spaces":{
"type":"pattern_replace",
"pattern":"(\s)",
"replacement":""
}
},
"filter":{
"english_stop":{
"type":"stop",
"ignore_case":true,
"stopwords":"_english_"
}
},
"tokenizer":{
"ngram_tokenizer":{
"type":"ngram",
"min_gram":2,
"max_gram":7
},
"ngram_tokenizer_whitespace":{
"type":"ngram",
"min_gram":2,
"max_gram":7,
"token_chars":[
"letter",
"digit",
"punctuation",
"symbol"
]
},
"word_tokenizer":{
"type":"standard"
}
}
}
}
According to ElasticSearch documentations, I didn't find any definition for search_analyzer
in fields. If this method doe's not work, is there any alternative structure to include search analyzer?