fzf/src/pattern.go

321 lines
7.1 KiB
Go
Raw Normal View History

2015-01-01 14:49:30 -05:00
package fzf
import (
"regexp"
"sort"
2015-01-01 14:49:30 -05:00
"strings"
2015-01-11 22:56:17 -05:00
"github.com/junegunn/fzf/src/algo"
2015-01-01 14:49:30 -05:00
)
2015-01-11 13:01:24 -05:00
const uppercaseLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
2015-01-01 14:49:30 -05:00
// fuzzy
// 'exact
// ^exact-prefix
// exact-suffix$
// !not-fuzzy
// !'not-exact
// !^not-exact-prefix
// !not-exact-suffix$
2015-01-11 13:01:24 -05:00
type termType int
2015-01-01 14:49:30 -05:00
const (
2015-01-11 13:01:24 -05:00
termFuzzy termType = iota
termExact
termPrefix
termSuffix
2015-01-01 14:49:30 -05:00
)
2015-01-11 13:01:24 -05:00
type term struct {
typ termType
2015-01-01 14:49:30 -05:00
inv bool
text []rune
origText []rune
}
2015-01-11 13:01:24 -05:00
// Pattern represents search pattern
2015-01-01 14:49:30 -05:00
type Pattern struct {
mode Mode
caseSensitive bool
text []rune
2015-01-11 13:01:24 -05:00
terms []term
2015-01-01 14:49:30 -05:00
hasInvTerm bool
delimiter *regexp.Regexp
nth []Range
2015-01-11 13:01:24 -05:00
procFun map[termType]func(bool, *string, []rune) (int, int)
2015-01-01 14:49:30 -05:00
}
var (
_patternCache map[string]*Pattern
_splitRegex *regexp.Regexp
_cache ChunkCache
)
func init() {
// We can uniquely identify the pattern for a given string since
// mode and caseMode do not change while the program is running
_patternCache = make(map[string]*Pattern)
_splitRegex = regexp.MustCompile("\\s+")
_cache = NewChunkCache()
}
func clearPatternCache() {
_patternCache = make(map[string]*Pattern)
}
2015-01-11 13:01:24 -05:00
// BuildPattern builds Pattern object from the given arguments
2015-01-01 14:49:30 -05:00
func BuildPattern(mode Mode, caseMode Case,
nth []Range, delimiter *regexp.Regexp, runes []rune) *Pattern {
var asString string
switch mode {
2015-01-11 13:01:24 -05:00
case ModeExtended, ModeExtendedExact:
2015-01-01 14:49:30 -05:00
asString = strings.Trim(string(runes), " ")
default:
asString = string(runes)
}
cached, found := _patternCache[asString]
if found {
return cached
}
caseSensitive, hasInvTerm := true, false
2015-01-11 13:01:24 -05:00
terms := []term{}
2015-01-01 14:49:30 -05:00
switch caseMode {
2015-01-11 13:01:24 -05:00
case CaseSmart:
if !strings.ContainsAny(asString, uppercaseLetters) {
2015-01-01 14:49:30 -05:00
runes, caseSensitive = []rune(strings.ToLower(asString)), false
}
2015-01-11 13:01:24 -05:00
case CaseIgnore:
2015-01-01 14:49:30 -05:00
runes, caseSensitive = []rune(strings.ToLower(asString)), false
}
switch mode {
2015-01-11 13:01:24 -05:00
case ModeExtended, ModeExtendedExact:
2015-01-01 14:49:30 -05:00
terms = parseTerms(mode, string(runes))
for _, term := range terms {
if term.inv {
hasInvTerm = true
}
}
}
ptr := &Pattern{
mode: mode,
caseSensitive: caseSensitive,
text: runes,
terms: terms,
hasInvTerm: hasInvTerm,
nth: nth,
delimiter: delimiter,
2015-01-11 13:01:24 -05:00
procFun: make(map[termType]func(bool, *string, []rune) (int, int))}
2015-01-01 14:49:30 -05:00
2015-01-11 22:56:17 -05:00
ptr.procFun[termFuzzy] = algo.FuzzyMatch
ptr.procFun[termExact] = algo.ExactMatchNaive
ptr.procFun[termPrefix] = algo.PrefixMatch
ptr.procFun[termSuffix] = algo.SuffixMatch
2015-01-01 14:49:30 -05:00
_patternCache[asString] = ptr
return ptr
}
2015-01-11 13:01:24 -05:00
func parseTerms(mode Mode, str string) []term {
2015-01-01 14:49:30 -05:00
tokens := _splitRegex.Split(str, -1)
2015-01-11 13:01:24 -05:00
terms := []term{}
2015-01-01 14:49:30 -05:00
for _, token := range tokens {
2015-01-11 13:01:24 -05:00
typ, inv, text := termFuzzy, false, token
2015-01-01 14:49:30 -05:00
origText := []rune(text)
2015-01-11 13:01:24 -05:00
if mode == ModeExtendedExact {
typ = termExact
2015-01-01 14:49:30 -05:00
}
if strings.HasPrefix(text, "!") {
inv = true
text = text[1:]
}
if strings.HasPrefix(text, "'") {
2015-01-11 13:01:24 -05:00
if mode == ModeExtended {
typ = termExact
2015-01-01 14:49:30 -05:00
text = text[1:]
}
} else if strings.HasPrefix(text, "^") {
2015-01-11 13:01:24 -05:00
typ = termPrefix
2015-01-01 14:49:30 -05:00
text = text[1:]
} else if strings.HasSuffix(text, "$") {
2015-01-11 13:01:24 -05:00
typ = termSuffix
2015-01-01 14:49:30 -05:00
text = text[:len(text)-1]
}
if len(text) > 0 {
2015-01-11 13:01:24 -05:00
terms = append(terms, term{
2015-01-01 14:49:30 -05:00
typ: typ,
inv: inv,
text: []rune(text),
origText: origText})
}
}
return terms
}
2015-01-11 13:01:24 -05:00
// IsEmpty returns true if the pattern is effectively empty
2015-01-01 14:49:30 -05:00
func (p *Pattern) IsEmpty() bool {
2015-01-11 13:01:24 -05:00
if p.mode == ModeFuzzy {
2015-01-01 14:49:30 -05:00
return len(p.text) == 0
}
2015-01-11 13:01:24 -05:00
return len(p.terms) == 0
2015-01-01 14:49:30 -05:00
}
2015-01-11 13:01:24 -05:00
// AsString returns the search query in string type
2015-01-01 14:49:30 -05:00
func (p *Pattern) AsString() string {
return string(p.text)
}
2015-01-11 13:01:24 -05:00
// CacheKey is used to build string to be used as the key of result cache
2015-01-01 14:49:30 -05:00
func (p *Pattern) CacheKey() string {
2015-01-11 13:01:24 -05:00
if p.mode == ModeFuzzy {
2015-01-01 14:49:30 -05:00
return p.AsString()
}
cacheableTerms := []string{}
for _, term := range p.terms {
if term.inv {
continue
}
cacheableTerms = append(cacheableTerms, string(term.origText))
}
return strings.Join(cacheableTerms, " ")
}
2015-01-11 13:01:24 -05:00
// Match returns the list of matches Items in the given Chunk
2015-01-01 14:49:30 -05:00
func (p *Pattern) Match(chunk *Chunk) []*Item {
space := chunk
// ChunkCache: Exact match
cacheKey := p.CacheKey()
if !p.hasInvTerm { // Because we're excluding Inv-term from cache key
if cached, found := _cache.Find(chunk, cacheKey); found {
return cached
}
}
2015-01-10 13:53:07 -05:00
// ChunkCache: Prefix/suffix match
Loop:
for idx := 1; idx < len(cacheKey); idx++ {
// [---------| ] | [ |---------]
// [--------| ] | [ |--------]
// [-------| ] | [ |-------]
prefix := cacheKey[:len(cacheKey)-idx]
suffix := cacheKey[idx:]
for _, substr := range [2]*string{&prefix, &suffix} {
if cached, found := _cache.Find(chunk, *substr); found {
2015-01-01 14:49:30 -05:00
cachedChunk := Chunk(cached)
space = &cachedChunk
2015-01-10 13:53:07 -05:00
break Loop
2015-01-01 14:49:30 -05:00
}
}
}
matches := p.matchChunk(space)
2015-01-01 14:49:30 -05:00
if !p.hasInvTerm {
_cache.Add(chunk, cacheKey, matches)
}
return matches
}
func (p *Pattern) matchChunk(chunk *Chunk) []*Item {
matches := []*Item{}
if p.mode == ModeFuzzy {
for _, item := range *chunk {
if sidx, eidx := p.fuzzyMatch(item); sidx >= 0 {
matches = append(matches,
dupItem(item, []Offset{Offset{int32(sidx), int32(eidx)}}))
}
}
} else {
for _, item := range *chunk {
if offsets := p.extendedMatch(item); len(offsets) == len(p.terms) {
matches = append(matches, dupItem(item, offsets))
}
}
}
return matches
}
// MatchItem returns true if the Item is a match
func (p *Pattern) MatchItem(item *Item) bool {
if p.mode == ModeFuzzy {
sidx, _ := p.fuzzyMatch(item)
return sidx >= 0
}
offsets := p.extendedMatch(item)
return len(offsets) == len(p.terms)
}
func dupItem(item *Item, offsets []Offset) *Item {
sort.Sort(ByOrder(offsets))
return &Item{
text: item.text,
origText: item.origText,
transformed: item.transformed,
index: item.index,
offsets: offsets,
rank: Rank{0, 0, item.index}}
}
func (p *Pattern) fuzzyMatch(item *Item) (int, int) {
input := p.prepareInput(item)
return p.iter(algo.FuzzyMatch, input, p.text)
2015-01-01 14:49:30 -05:00
}
func (p *Pattern) extendedMatch(item *Item) []Offset {
input := p.prepareInput(item)
offsets := []Offset{}
for _, term := range p.terms {
pfun := p.procFun[term.typ]
if sidx, eidx := p.iter(pfun, input, term.text); sidx >= 0 {
if term.inv {
break
2015-01-01 14:49:30 -05:00
}
offsets = append(offsets, Offset{int32(sidx), int32(eidx)})
} else if term.inv {
offsets = append(offsets, Offset{0, 0})
2015-01-01 14:49:30 -05:00
}
}
return offsets
2015-01-01 14:49:30 -05:00
}
func (p *Pattern) prepareInput(item *Item) *Transformed {
if item.transformed != nil {
return item.transformed
}
var ret *Transformed
if len(p.nth) > 0 {
tokens := Tokenize(item.text, p.delimiter)
ret = Transform(tokens, p.nth)
} else {
trans := Transformed{
whole: item.text,
parts: []Token{Token{text: item.text, prefixLength: 0}}}
ret = &trans
}
item.transformed = ret
return ret
}
func (p *Pattern) iter(pfun func(bool, *string, []rune) (int, int),
inputs *Transformed, pattern []rune) (int, int) {
for _, part := range inputs.parts {
prefixLength := part.prefixLength
if sidx, eidx := pfun(p.caseSensitive, part.text, pattern); sidx >= 0 {
return sidx + prefixLength, eidx + prefixLength
}
}
return -1, -1
}