-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathtokenize.go
More file actions
44 lines (36 loc) · 1.21 KB
/
tokenize.go
File metadata and controls
44 lines (36 loc) · 1.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
package textrank
import (
"regexp"
"strings"
"github.com/neurosnap/sentences/english"
)
// tokenizeWordsReplacePunctRe is a RegExp that replaces punctuation with spaces
// when tokenizing text into words.
var tokenizeWordsReplacePunctRe = regexp.MustCompile(`[.,\/!&;:=\-_]`)
// tokenizeWordsRemovePunctRe is a RegExp that removes punctuation when
// tokenizing text into words.
var tokenizeWordsRemovePunctRe = regexp.MustCompile(`[#$%\^\*{}~()\?\'\"]`)
// tokenizeSentences tokenises the text into sentences.
func tokenizeSentences(text string) []string {
tokenizer, _ := english.NewSentenceTokenizer(nil)
sentences := []string{}
for _, token := range tokenizer.Tokenize(text) {
token := strings.TrimSpace(token.Text)
if token != "" {
sentences = append(sentences, strings.TrimSuffix(token, "."))
}
}
return sentences
}
// tokenizeWords tokenizes the text into words.
func tokenizeWords(text string) []string {
text = strings.ToLower(tokenizeWordsReplacePunctRe.ReplaceAllString(text, " "))
text = strings.ToLower(tokenizeWordsRemovePunctRe.ReplaceAllString(text, ""))
words := []string{}
for _, word := range strings.Split(text, " ") {
if word != "" {
words = append(words, word)
}
}
return words
}