From 9e99c6151fef3651804bb3c28a8302b2dbb8cdaa Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 3 Jan 2025 19:43:05 +0000 Subject: [PATCH] [DOC] Tokenizer - Letter (#8498) * adding page letter tokenizer Signed-off-by: leanne.laceybyrne@eliatra.com * Doc review Signed-off-by: Fanit Kolchina * Apply suggestions from code review Co-authored-by: Nathan Bower Signed-off-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> --------- Signed-off-by: leanne.laceybyrne@eliatra.com Signed-off-by: Fanit Kolchina Signed-off-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Co-authored-by: Fanit Kolchina Co-authored-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Co-authored-by: Nathan Bower (cherry picked from commit a3af660ca885e09441453979a1d06867464af7c2) Signed-off-by: github-actions[bot] --- _analyzers/tokenizers/letter.md | 97 +++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 _analyzers/tokenizers/letter.md diff --git a/_analyzers/tokenizers/letter.md b/_analyzers/tokenizers/letter.md new file mode 100644 index 0000000000..ba67a7841d --- /dev/null +++ b/_analyzers/tokenizers/letter.md @@ -0,0 +1,97 @@ +--- +layout: default +title: Letter +parent: Tokenizers +nav_order: 60 +--- + +# Letter tokenizer + +The `letter` tokenizer splits text into words on any non-letter characters. It works well with many European languages but is ineffective with some Asian languages in which words aren't separated by spaces. + +## Example usage + +The following example request creates a new index named `my_index` and configures an analyzer with a `letter` tokenizer: + +```json +PUT /my_index +{ + "settings": { + "analysis": { + "analyzer": { + "my_letter_analyzer": { + "type": "custom", + "tokenizer": "letter" + } + } + } + }, + "mappings": { + "properties": { + "content": { + "type": "text", + "analyzer": "my_letter_analyzer" + } + } + } +} +``` +{% include copy-curl.html %} + +## Generated tokens + +Use the following request to examine the tokens generated using the analyzer: + +```json +POST _analyze +{ + "tokenizer": "letter", + "text": "Cats 4EVER love chasing butterflies!" +} + +``` +{% include copy-curl.html %} + +The response contains the generated tokens: + +```json +{ + "tokens": [ + { + "token": "Cats", + "start_offset": 0, + "end_offset": 4, + "type": "word", + "position": 0 + }, + { + "token": "EVER", + "start_offset": 6, + "end_offset": 10, + "type": "word", + "position": 1 + }, + { + "token": "love", + "start_offset": 11, + "end_offset": 15, + "type": "word", + "position": 2 + }, + { + "token": "chasing", + "start_offset": 16, + "end_offset": 23, + "type": "word", + "position": 3 + }, + { + "token": "butterflies", + "start_offset": 24, + "end_offset": 35, + "type": "word", + "position": 4 + } + ] +} +```