Fix Transform result cache to speed up subsequent searches

This commit is contained in:
Junegunn Choi 2015-01-11 01:47:46 +09:00
parent 4f40314433
commit ca4bdfb4bd
3 changed files with 23 additions and 15 deletions

View File

@ -10,9 +10,9 @@ type Offset [2]int32
type Item struct { type Item struct {
text *string text *string
origText *string origText *string
transformed *Transformed
offsets []Offset offsets []Offset
rank Rank rank Rank
transformed *Transformed
} }
type Rank struct { type Rank struct {

View File

@ -229,16 +229,22 @@ func (p *Pattern) Match(chunk *Chunk) []*Item {
return matches return matches
} }
func dupItem(item *Item, offsets []Offset) *Item {
return &Item{
text: item.text,
origText: item.origText,
transformed: item.transformed,
offsets: offsets,
rank: Rank{0, 0, item.rank.index}}
}
func (p *Pattern) fuzzyMatch(chunk *Chunk) []*Item { func (p *Pattern) fuzzyMatch(chunk *Chunk) []*Item {
matches := []*Item{} matches := []*Item{}
for _, item := range *chunk { for _, item := range *chunk {
input := p.prepareInput(item) input := p.prepareInput(item)
if sidx, eidx := p.iter(FuzzyMatch, input, p.text); sidx >= 0 { if sidx, eidx := p.iter(FuzzyMatch, input, p.text); sidx >= 0 {
matches = append(matches, &Item{ matches = append(matches,
text: item.text, dupItem(item, []Offset{Offset{int32(sidx), int32(eidx)}}))
origText: item.origText,
offsets: []Offset{Offset{int32(sidx), int32(eidx)}},
rank: Rank{0, 0, item.rank.index}})
} }
} }
return matches return matches
@ -262,11 +268,7 @@ func (p *Pattern) extendedMatch(chunk *Chunk) []*Item {
} }
} }
if len(offsets) == len(p.terms) { if len(offsets) == len(p.terms) {
matches = append(matches, &Item{ matches = append(matches, dupItem(item, offsets))
text: item.text,
origText: item.origText,
offsets: offsets,
rank: Rank{0, 0, item.rank.index}})
} }
} }
return matches return matches

View File

@ -86,19 +86,25 @@ func TestCaseSensitivity(t *testing.T) {
} }
} }
func TestOrigText(t *testing.T) { func TestOrigTextAndTransformed(t *testing.T) {
strptr := func(str string) *string { strptr := func(str string) *string {
return &str return &str
} }
pattern := BuildPattern(MODE_EXTENDED, CASE_SMART, []Range{}, nil, []rune("jg")) pattern := BuildPattern(MODE_EXTENDED, CASE_SMART, []Range{}, nil, []rune("jg"))
tokens := Tokenize(strptr("junegunn"), nil)
trans := Transform(tokens, []Range{Range{1, 1}})
for _, fun := range []func(*Chunk) []*Item{pattern.fuzzyMatch, pattern.extendedMatch} { for _, fun := range []func(*Chunk) []*Item{pattern.fuzzyMatch, pattern.extendedMatch} {
chunk := Chunk{ chunk := Chunk{
&Item{text: strptr("junegunn"), origText: strptr("junegunn.choi")}, &Item{
text: strptr("junegunn"),
origText: strptr("junegunn.choi"),
transformed: trans},
} }
matches := fun(&chunk) matches := fun(&chunk)
if *matches[0].text != "junegunn" || *matches[0].origText != "junegunn.choi" || if *matches[0].text != "junegunn" || *matches[0].origText != "junegunn.choi" ||
matches[0].offsets[0][0] != 0 || matches[0].offsets[0][1] != 5 { matches[0].offsets[0][0] != 0 || matches[0].offsets[0][1] != 5 ||
matches[0].transformed != trans {
t.Error("Invalid match result", matches) t.Error("Invalid match result", matches)
} }
} }