Rewrite fzf in Go
This commit is contained in:
parent
7ba93d9f83
commit
f3177305d5
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,3 +1,5 @@
|
||||
bin
|
||||
src/fzf/fzf_*
|
||||
pkg
|
||||
Gemfile.lock
|
||||
.DS_Store
|
||||
|
126
install
126
install
@ -3,60 +3,81 @@
|
||||
cd `dirname $BASH_SOURCE`
|
||||
fzf_base=`pwd`
|
||||
|
||||
# ruby executable
|
||||
echo -n "Checking Ruby executable ... "
|
||||
ruby=`which ruby`
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ruby executable not found!"
|
||||
exit 1
|
||||
fi
|
||||
ARCHI=$(uname -sm)
|
||||
|
||||
# System ruby is preferred
|
||||
system_ruby=/usr/bin/ruby
|
||||
if [ -x $system_ruby -a $system_ruby != "$ruby" ]; then
|
||||
$system_ruby --disable-gems -rcurses -e0 2> /dev/null
|
||||
[ $? -eq 0 ] && ruby=$system_ruby
|
||||
fi
|
||||
|
||||
echo "OK ($ruby)"
|
||||
|
||||
# Curses-support
|
||||
echo -n "Checking Curses support ... "
|
||||
"$ruby" -rcurses -e0 2> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "OK"
|
||||
else
|
||||
echo "Not found"
|
||||
echo "Installing 'curses' gem ... "
|
||||
if (( EUID )); then
|
||||
/usr/bin/env gem install curses --user-install
|
||||
download() {
|
||||
echo "Downloading fzf executable ($1) ..."
|
||||
if curl -fLo "$fzf_base"/bin/fzf https://github.com/junegunn/fzf-bin/releases/download/snapshot/$1; then
|
||||
chmod +x "$fzf_base"/bin/fzf
|
||||
else
|
||||
/usr/bin/env gem install curses
|
||||
fi
|
||||
if [ $? -ne 0 ]; then
|
||||
echo
|
||||
echo "Failed to install 'curses' gem."
|
||||
if [[ $(uname -r) =~ 'ARCH' ]]; then
|
||||
echo "Make sure that base-devel package group is installed."
|
||||
fi
|
||||
echo "Failed to download $1"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Ruby version
|
||||
echo -n "Checking Ruby version ... "
|
||||
"$ruby" -e 'exit RUBY_VERSION >= "1.9"'
|
||||
if [ $? -eq 0 ]; then
|
||||
echo ">= 1.9"
|
||||
"$ruby" --disable-gems -rcurses -e0 2> /dev/null
|
||||
mkdir -p "$fzf_base"/bin
|
||||
if [ "$ARCHI" = "Darwin x86_64" ]; then
|
||||
download fzf_darwin_amd64
|
||||
elif [ "$ARCHI" = "Linux x86_64" ]; then
|
||||
download fzf_linux_amd64
|
||||
else # No prebuilt executable
|
||||
echo "No prebuilt binary for $ARCHI ... Installing legacy Ruby version ..."
|
||||
|
||||
# ruby executable
|
||||
echo -n "Checking Ruby executable ... "
|
||||
ruby=`which ruby`
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ruby executable not found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# System ruby is preferred
|
||||
system_ruby=/usr/bin/ruby
|
||||
if [ -x $system_ruby -a $system_ruby != "$ruby" ]; then
|
||||
$system_ruby --disable-gems -rcurses -e0 2> /dev/null
|
||||
[ $? -eq 0 ] && ruby=$system_ruby
|
||||
fi
|
||||
|
||||
echo "OK ($ruby)"
|
||||
|
||||
# Curses-support
|
||||
echo -n "Checking Curses support ... "
|
||||
"$ruby" -rcurses -e0 2> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
fzf_cmd="$ruby --disable-gems $fzf_base/fzf"
|
||||
echo "OK"
|
||||
else
|
||||
echo "Not found"
|
||||
echo "Installing 'curses' gem ... "
|
||||
if (( EUID )); then
|
||||
/usr/bin/env gem install curses --user-install
|
||||
else
|
||||
/usr/bin/env gem install curses
|
||||
fi
|
||||
if [ $? -ne 0 ]; then
|
||||
echo
|
||||
echo "Failed to install 'curses' gem."
|
||||
if [[ $(uname -r) =~ 'ARCH' ]]; then
|
||||
echo "Make sure that base-devel package group is installed."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Ruby version
|
||||
echo -n "Checking Ruby version ... "
|
||||
"$ruby" -e 'exit RUBY_VERSION >= "1.9"'
|
||||
if [ $? -eq 0 ]; then
|
||||
echo ">= 1.9"
|
||||
"$ruby" --disable-gems -rcurses -e0 2> /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
fzf_cmd="$ruby --disable-gems $fzf_base/fzf"
|
||||
else
|
||||
fzf_cmd="$ruby $fzf_base/fzf"
|
||||
fi
|
||||
else
|
||||
echo "< 1.9"
|
||||
fzf_cmd="$ruby $fzf_base/fzf"
|
||||
fi
|
||||
else
|
||||
echo "< 1.9"
|
||||
fzf_cmd="$ruby $fzf_base/fzf"
|
||||
fi
|
||||
|
||||
# Auto-completion
|
||||
@ -85,10 +106,17 @@ for shell in bash zsh; do
|
||||
# Setup fzf function
|
||||
# ------------------
|
||||
unalias fzf 2> /dev/null
|
||||
fzf() {
|
||||
$fzf_cmd "\$@"
|
||||
}
|
||||
export -f fzf > /dev/null
|
||||
unset fzf 2> /dev/null
|
||||
if [ -x "$fzf_base/bin/fzf" ]; then
|
||||
if [[ ! "\$PATH" =~ "$fzf_base/bin" ]]; then
|
||||
export PATH="$fzf_base/bin:\$PATH"
|
||||
fi
|
||||
else
|
||||
fzf() {
|
||||
$fzf_cmd "\$@"
|
||||
}
|
||||
export -f fzf > /dev/null
|
||||
fi
|
||||
|
||||
# Auto-completion
|
||||
# ---------------
|
||||
|
33
src/Dockerfile
Normal file
33
src/Dockerfile
Normal file
@ -0,0 +1,33 @@
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Junegunn Choi <junegunn.c@gmail.com>
|
||||
|
||||
# apt-get
|
||||
RUN apt-get update && apt-get -y upgrade
|
||||
RUN apt-get install -y --force-yes git vim-nox curl procps sudo \
|
||||
build-essential libncurses-dev
|
||||
|
||||
# Setup jg user with sudo privilege
|
||||
RUN useradd -s /bin/bash -m jg && echo 'jg:jg' | chpasswd && \
|
||||
echo 'jg ALL=(ALL) NOPASSWD: ALL' > /etc/sudoers.d/jg
|
||||
|
||||
# Setup dotfiles
|
||||
USER jg
|
||||
RUN cd ~ && git clone https://github.com/junegunn/dotfiles.git && \
|
||||
dotfiles/install > /dev/null
|
||||
|
||||
# Install Go 1.4
|
||||
RUN cd ~ && curl https://storage.googleapis.com/golang/go1.4.linux-amd64.tar.gz | tar -xz && \
|
||||
mv go go1.4 && \
|
||||
echo 'export GOROOT=~/go1.4' >> ~/dotfiles/bashrc-extra && \
|
||||
echo 'export PATH=~/go1.4/bin:$PATH' >> ~/dotfiles/bashrc-extra
|
||||
|
||||
# Symlink fzf directory
|
||||
RUN mkdir -p ~jg/go/src/github.com/junegunn && \
|
||||
ln -s /fzf ~jg/go/src/github.com/junegunn/fzf
|
||||
|
||||
# Volume
|
||||
VOLUME /fzf
|
||||
|
||||
# Default CMD
|
||||
CMD cd ~jg/go/src/github.com/junegunn/fzf/src && /bin/bash -l
|
||||
|
21
src/LICENSE
Normal file
21
src/LICENSE
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Junegunn Choi
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
49
src/Makefile
Normal file
49
src/Makefile
Normal file
@ -0,0 +1,49 @@
|
||||
BINARY := fzf/fzf
|
||||
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
BINARY := $(BINARY)_darwin
|
||||
else ifeq ($(UNAME_S),Linux)
|
||||
BINARY := $(BINARY)_linux
|
||||
endif
|
||||
|
||||
UNAME_M := $(shell uname -m)
|
||||
ifneq ($(filter i386 i686,$(UNAME_M)),)
|
||||
$(error "filtered is not supported, yet.")
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME_M),x86_64)
|
||||
BINARY := $(BINARY)_amd64
|
||||
else ifneq ($(filter i386 i686,$(UNAME_M)),)
|
||||
BINARY := $(BINARY)_386
|
||||
else # TODO
|
||||
$(error "$(UNAME_M) is not supported, yet.")
|
||||
endif
|
||||
|
||||
BINDIR = ../bin
|
||||
SOURCES = $(wildcard *.go fzf/*.go)
|
||||
|
||||
all: build
|
||||
|
||||
build: $(BINARY)
|
||||
|
||||
$(BINARY): $(SOURCES)
|
||||
go get
|
||||
go test -v
|
||||
cd fzf && go build -o $(notdir $(BINARY))
|
||||
|
||||
install: $(BINARY)
|
||||
mkdir -p $(BINDIR)
|
||||
cp -f $(BINARY) $(BINDIR)/fzf
|
||||
|
||||
clean:
|
||||
rm -f $(BINARY)
|
||||
|
||||
docker:
|
||||
docker build -t junegunn/ubuntu-sandbox .
|
||||
|
||||
linux64:
|
||||
docker run -i -t -u jg -v $(shell cd ..; pwd):/fzf junegunn/ubuntu-sandbox \
|
||||
/bin/bash -ci 'cd ~jg/go/src/github.com/junegunn/fzf/src; make build'
|
||||
|
||||
.PHONY: build install linux64 clean docker run
|
59
src/README.md
Normal file
59
src/README.md
Normal file
@ -0,0 +1,59 @@
|
||||
fzf in Go
|
||||
=========
|
||||
|
||||
This directory contains the source code for the new fzf implementation in Go.
|
||||
This new version has the following benefits over the previous Ruby version.
|
||||
|
||||
- Immensely faster
|
||||
- No GIL. Performance is linearly proportional to the number of cores.
|
||||
- It's so fast that I even decided to remove the sort limit (`--sort=N`)
|
||||
- Does not require Ruby and distributed as an executable binary
|
||||
- Ruby dependency is especially painful on Ruby 2.1 or above which
|
||||
ships without curses gem
|
||||
|
||||
Build
|
||||
-----
|
||||
|
||||
```sh
|
||||
# Build fzf executable
|
||||
make
|
||||
|
||||
# Install the executable to ../bin directory
|
||||
make install
|
||||
|
||||
# Build executable for Linux x86_64 using Docker
|
||||
make linux64
|
||||
```
|
||||
|
||||
|
||||
Prebuilt binaries
|
||||
-----------------
|
||||
|
||||
- Darwin x86_64
|
||||
- Linux x86_64
|
||||
|
||||
Third-party libraries used
|
||||
--------------------------
|
||||
|
||||
- [ncurses](https://www.gnu.org/software/ncurses/)
|
||||
- [mattn/go-runewidth](https://github.com/mattn/go-runewidth)
|
||||
- Licensed under [MIT](http://mattn.mit-license.org/2013)
|
||||
- [mattn/go-shellwords](https://github.com/mattn/go-shellwords)
|
||||
- Licensed under [MIT](http://mattn.mit-license.org/2014)
|
||||
|
||||
Contribution
|
||||
------------
|
||||
|
||||
For the moment, I will not add or accept any new features until we can be sure
|
||||
that the implementation is stable and we have a sufficient number of test
|
||||
cases. However, fixes for obvious bugs and new test cases are welcome.
|
||||
|
||||
I also care much about the performance of the implementation (that's the
|
||||
reason I rewrote the whole thing in Go, right?), so please make sure that your
|
||||
change does not result in performance regression. Please be minded that we
|
||||
still don't have a quantitative measure of the performance.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
- [MIT](LICENSE)
|
152
src/algo.go
Normal file
152
src/algo.go
Normal file
@ -0,0 +1,152 @@
|
||||
package fzf
|
||||
|
||||
import "strings"
|
||||
|
||||
/*
|
||||
* String matching algorithms here do not use strings.ToLower to avoid
|
||||
* performance penalty. And they assume pattern runes are given in lowercase
|
||||
* letters when caseSensitive is false.
|
||||
*
|
||||
* In short: They try to do as little work as possible.
|
||||
*/
|
||||
|
||||
func FuzzyMatch(caseSensitive bool, input *string, pattern []rune) (int, int) {
|
||||
runes := []rune(*input)
|
||||
|
||||
// 0. (FIXME) How to find the shortest match?
|
||||
// a_____b__c__abc
|
||||
// ^^^^^^^^^^ ^^^
|
||||
// 1. forward scan (abc)
|
||||
// *-----*-----*>
|
||||
// a_____b___abc__
|
||||
// 2. reverse scan (cba)
|
||||
// a_____b___abc__
|
||||
// <***
|
||||
pidx := 0
|
||||
sidx := -1
|
||||
eidx := -1
|
||||
|
||||
for index, char := range runes {
|
||||
// This is considerably faster than blindly applying strings.ToLower to the
|
||||
// whole string
|
||||
if !caseSensitive && char >= 65 && char <= 90 {
|
||||
char += 32
|
||||
}
|
||||
if char == pattern[pidx] {
|
||||
if sidx < 0 {
|
||||
sidx = index
|
||||
}
|
||||
if pidx += 1; pidx == len(pattern) {
|
||||
eidx = index + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sidx >= 0 && eidx >= 0 {
|
||||
pidx -= 1
|
||||
for index := eidx - 1; index >= sidx; index-- {
|
||||
char := runes[index]
|
||||
if !caseSensitive && char >= 65 && char <= 90 {
|
||||
char += 32
|
||||
}
|
||||
if char == pattern[pidx] {
|
||||
if pidx -= 1; pidx < 0 {
|
||||
sidx = index
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return sidx, eidx
|
||||
}
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
func ExactMatchStrings(caseSensitive bool, input *string, pattern []rune) (int, int) {
|
||||
var str string
|
||||
if caseSensitive {
|
||||
str = *input
|
||||
} else {
|
||||
str = strings.ToLower(*input)
|
||||
}
|
||||
|
||||
if idx := strings.Index(str, string(pattern)); idx >= 0 {
|
||||
prefixRuneLen := len([]rune((*input)[:idx]))
|
||||
return prefixRuneLen, prefixRuneLen + len(pattern)
|
||||
}
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a basic string searching algorithm that handles case sensitivity.
|
||||
* Although naive, it still performs better than the combination of
|
||||
* strings.ToLower + strings.Index for typical fzf use cases where input
|
||||
* strings and patterns are not very long.
|
||||
*
|
||||
* We might try to implement better algorithms in the future:
|
||||
* http://en.wikipedia.org/wiki/String_searching_algorithm
|
||||
*/
|
||||
func ExactMatchNaive(caseSensitive bool, input *string, pattern []rune) (int, int) {
|
||||
runes := []rune(*input)
|
||||
numRunes := len(runes)
|
||||
plen := len(pattern)
|
||||
if len(runes) < plen {
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
pidx := 0
|
||||
for index := 0; index < numRunes; index++ {
|
||||
char := runes[index]
|
||||
if !caseSensitive && char >= 65 && char <= 90 {
|
||||
char += 32
|
||||
}
|
||||
if pattern[pidx] == char {
|
||||
pidx += 1
|
||||
if pidx == plen {
|
||||
return index - plen + 1, index + 1
|
||||
}
|
||||
} else {
|
||||
index -= pidx
|
||||
pidx = 0
|
||||
}
|
||||
}
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
func PrefixMatch(caseSensitive bool, input *string, pattern []rune) (int, int) {
|
||||
runes := []rune(*input)
|
||||
if len(runes) < len(pattern) {
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
for index, r := range pattern {
|
||||
char := runes[index]
|
||||
if !caseSensitive && char >= 65 && char <= 90 {
|
||||
char += 32
|
||||
}
|
||||
if char != r {
|
||||
return -1, -1
|
||||
}
|
||||
}
|
||||
return 0, len(pattern)
|
||||
}
|
||||
|
||||
func SuffixMatch(caseSensitive bool, input *string, pattern []rune) (int, int) {
|
||||
runes := []rune(strings.TrimRight(*input, " "))
|
||||
trimmedLen := len(runes)
|
||||
diff := trimmedLen - len(pattern)
|
||||
if diff < 0 {
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
for index, r := range pattern {
|
||||
char := runes[index+diff]
|
||||
if !caseSensitive && char >= 65 && char <= 90 {
|
||||
char += 32
|
||||
}
|
||||
if char != r {
|
||||
return -1, -1
|
||||
}
|
||||
}
|
||||
return trimmedLen - len(pattern), trimmedLen
|
||||
}
|
44
src/algo_test.go
Normal file
44
src/algo_test.go
Normal file
@ -0,0 +1,44 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func assertMatch(t *testing.T, fun func(bool, *string, []rune) (int, int), caseSensitive bool, input string, pattern string, sidx int, eidx int) {
|
||||
if !caseSensitive {
|
||||
pattern = strings.ToLower(pattern)
|
||||
}
|
||||
s, e := fun(caseSensitive, &input, []rune(pattern))
|
||||
if s != sidx {
|
||||
t.Errorf("Invalid start index: %d (expected: %d, %s / %s)", s, sidx, input, pattern)
|
||||
}
|
||||
if e != eidx {
|
||||
t.Errorf("Invalid end index: %d (expected: %d, %s / %s)", e, eidx, input, pattern)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzyMatch(t *testing.T) {
|
||||
assertMatch(t, FuzzyMatch, false, "fooBarbaz", "oBZ", 2, 9)
|
||||
assertMatch(t, FuzzyMatch, true, "fooBarbaz", "oBZ", -1, -1)
|
||||
assertMatch(t, FuzzyMatch, true, "fooBarbaz", "oBz", 2, 9)
|
||||
assertMatch(t, FuzzyMatch, true, "fooBarbaz", "fooBarbazz", -1, -1)
|
||||
}
|
||||
|
||||
func TestExactMatchNaive(t *testing.T) {
|
||||
assertMatch(t, ExactMatchNaive, false, "fooBarbaz", "oBA", 2, 5)
|
||||
assertMatch(t, ExactMatchNaive, true, "fooBarbaz", "oBA", -1, -1)
|
||||
assertMatch(t, ExactMatchNaive, true, "fooBarbaz", "fooBarbazz", -1, -1)
|
||||
}
|
||||
|
||||
func TestPrefixMatch(t *testing.T) {
|
||||
assertMatch(t, PrefixMatch, false, "fooBarbaz", "Foo", 0, 3)
|
||||
assertMatch(t, PrefixMatch, true, "fooBarbaz", "Foo", -1, -1)
|
||||
assertMatch(t, PrefixMatch, false, "fooBarbaz", "baz", -1, -1)
|
||||
}
|
||||
|
||||
func TestSuffixMatch(t *testing.T) {
|
||||
assertMatch(t, SuffixMatch, false, "fooBarbaz", "Foo", -1, -1)
|
||||
assertMatch(t, SuffixMatch, false, "fooBarbaz", "baz", 6, 9)
|
||||
assertMatch(t, SuffixMatch, true, "fooBarbaz", "Baz", -1, -1)
|
||||
}
|
27
src/atomicbool.go
Normal file
27
src/atomicbool.go
Normal file
@ -0,0 +1,27 @@
|
||||
package fzf
|
||||
|
||||
import "sync"
|
||||
|
||||
type AtomicBool struct {
|
||||
mutex sync.Mutex
|
||||
state bool
|
||||
}
|
||||
|
||||
func NewAtomicBool(initialState bool) *AtomicBool {
|
||||
return &AtomicBool{
|
||||
mutex: sync.Mutex{},
|
||||
state: initialState}
|
||||
}
|
||||
|
||||
func (a *AtomicBool) Get() bool {
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
return a.state
|
||||
}
|
||||
|
||||
func (a *AtomicBool) Set(newState bool) bool {
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
a.state = newState
|
||||
return a.state
|
||||
}
|
17
src/atomicbool_test.go
Normal file
17
src/atomicbool_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package fzf
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestAtomicBool(t *testing.T) {
|
||||
if !NewAtomicBool(true).Get() || NewAtomicBool(false).Get() {
|
||||
t.Error("Invalid initial value")
|
||||
}
|
||||
|
||||
ab := NewAtomicBool(true)
|
||||
if ab.Set(false) {
|
||||
t.Error("Invalid return value")
|
||||
}
|
||||
if ab.Get() {
|
||||
t.Error("Invalid state")
|
||||
}
|
||||
}
|
47
src/cache.go
Normal file
47
src/cache.go
Normal file
@ -0,0 +1,47 @@
|
||||
package fzf
|
||||
|
||||
import "sync"
|
||||
|
||||
type QueryCache map[string][]*Item
|
||||
type ChunkCache struct {
|
||||
mutex sync.Mutex
|
||||
cache map[*Chunk]*QueryCache
|
||||
}
|
||||
|
||||
func NewChunkCache() ChunkCache {
|
||||
return ChunkCache{sync.Mutex{}, make(map[*Chunk]*QueryCache)}
|
||||
}
|
||||
|
||||
func (cc *ChunkCache) Add(chunk *Chunk, key string, list []*Item) {
|
||||
if len(key) == 0 || !chunk.IsFull() {
|
||||
return
|
||||
}
|
||||
|
||||
cc.mutex.Lock()
|
||||
defer cc.mutex.Unlock()
|
||||
|
||||
qc, ok := cc.cache[chunk]
|
||||
if !ok {
|
||||
cc.cache[chunk] = &QueryCache{}
|
||||
qc = cc.cache[chunk]
|
||||
}
|
||||
(*qc)[key] = list
|
||||
}
|
||||
|
||||
func (cc *ChunkCache) Find(chunk *Chunk, key string) ([]*Item, bool) {
|
||||
if len(key) == 0 || !chunk.IsFull() {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
cc.mutex.Lock()
|
||||
defer cc.mutex.Unlock()
|
||||
|
||||
qc, ok := cc.cache[chunk]
|
||||
if ok {
|
||||
list, ok := (*qc)[key]
|
||||
if ok {
|
||||
return list, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
73
src/chunklist.go
Normal file
73
src/chunklist.go
Normal file
@ -0,0 +1,73 @@
|
||||
package fzf
|
||||
|
||||
import "sync"
|
||||
|
||||
const CHUNK_SIZE int = 100
|
||||
|
||||
type Chunk []*Item // >>> []Item
|
||||
|
||||
type Transformer func(*string, int) *Item
|
||||
|
||||
type ChunkList struct {
|
||||
chunks []*Chunk
|
||||
count int
|
||||
mutex sync.Mutex
|
||||
trans Transformer
|
||||
}
|
||||
|
||||
func NewChunkList(trans Transformer) *ChunkList {
|
||||
return &ChunkList{
|
||||
chunks: []*Chunk{},
|
||||
count: 0,
|
||||
mutex: sync.Mutex{},
|
||||
trans: trans}
|
||||
}
|
||||
|
||||
func (c *Chunk) push(trans Transformer, data *string, index int) {
|
||||
*c = append(*c, trans(data, index))
|
||||
}
|
||||
|
||||
func (c *Chunk) IsFull() bool {
|
||||
return len(*c) == CHUNK_SIZE
|
||||
}
|
||||
|
||||
func (cl *ChunkList) lastChunk() *Chunk {
|
||||
return cl.chunks[len(cl.chunks)-1]
|
||||
}
|
||||
|
||||
func CountItems(cs []*Chunk) int {
|
||||
if len(cs) == 0 {
|
||||
return 0
|
||||
}
|
||||
return CHUNK_SIZE*(len(cs)-1) + len(*(cs[len(cs)-1]))
|
||||
}
|
||||
|
||||
func (cl *ChunkList) Count() int {
|
||||
return cl.count
|
||||
}
|
||||
|
||||
func (cl *ChunkList) Chunks() []*Chunk {
|
||||
return cl.chunks
|
||||
}
|
||||
|
||||
func (cl *ChunkList) Push(data string) {
|
||||
cl.mutex.Lock()
|
||||
defer cl.mutex.Unlock()
|
||||
|
||||
if len(cl.chunks) == 0 || cl.lastChunk().IsFull() {
|
||||
newChunk := Chunk(make([]*Item, 0, CHUNK_SIZE))
|
||||
cl.chunks = append(cl.chunks, &newChunk)
|
||||
}
|
||||
|
||||
cl.lastChunk().push(cl.trans, &data, cl.count)
|
||||
cl.count += 1
|
||||
}
|
||||
|
||||
func (cl *ChunkList) Snapshot() []*Chunk {
|
||||
cl.mutex.Lock()
|
||||
defer cl.mutex.Unlock()
|
||||
|
||||
ret := make([]*Chunk, len(cl.chunks))
|
||||
copy(ret, cl.chunks)
|
||||
return ret
|
||||
}
|
66
src/chunklist_test.go
Normal file
66
src/chunklist_test.go
Normal file
@ -0,0 +1,66 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChunkList(t *testing.T) {
|
||||
cl := NewChunkList(func(s *string, i int) *Item {
|
||||
return &Item{text: s, index: i * 2}
|
||||
})
|
||||
|
||||
// Snapshot
|
||||
snapshot := cl.Snapshot()
|
||||
if len(snapshot) > 0 {
|
||||
t.Error("Snapshot should be empty now")
|
||||
}
|
||||
|
||||
// Add some data
|
||||
cl.Push("hello")
|
||||
cl.Push("world")
|
||||
|
||||
// Previously created snapshot should remain the same
|
||||
if len(snapshot) > 0 {
|
||||
t.Error("Snapshot should not have changed")
|
||||
}
|
||||
|
||||
// But the new snapshot should contain the added items
|
||||
snapshot = cl.Snapshot()
|
||||
if len(snapshot) != 1 {
|
||||
t.Error("Snapshot should not be empty now")
|
||||
}
|
||||
|
||||
// Check the content of the ChunkList
|
||||
chunk1 := snapshot[0]
|
||||
if len(*chunk1) != 2 {
|
||||
t.Error("Snapshot should contain only two items")
|
||||
}
|
||||
if *(*chunk1)[0].text != "hello" || (*chunk1)[0].index != 0 ||
|
||||
*(*chunk1)[1].text != "world" || (*chunk1)[1].index != 2 {
|
||||
t.Error("Invalid data")
|
||||
}
|
||||
if chunk1.IsFull() {
|
||||
t.Error("Chunk should not have been marked full yet")
|
||||
}
|
||||
|
||||
// Add more data
|
||||
for i := 0; i < CHUNK_SIZE*2; i++ {
|
||||
cl.Push(fmt.Sprintf("item %d", i))
|
||||
}
|
||||
|
||||
// Previous snapshot should remain the same
|
||||
if len(snapshot) != 1 {
|
||||
t.Error("Snapshot should stay the same")
|
||||
}
|
||||
|
||||
// New snapshot
|
||||
snapshot = cl.Snapshot()
|
||||
if len(snapshot) != 3 || !snapshot[0].IsFull() ||
|
||||
!snapshot[1].IsFull() || snapshot[2].IsFull() {
|
||||
t.Error("Expected two full chunks and one more chunk")
|
||||
}
|
||||
if len(*snapshot[2]) != 2 {
|
||||
t.Error("Unexpected number of items")
|
||||
}
|
||||
}
|
12
src/constants.go
Normal file
12
src/constants.go
Normal file
@ -0,0 +1,12 @@
|
||||
package fzf
|
||||
|
||||
const VERSION = "0.9.0"
|
||||
|
||||
const (
|
||||
EVT_READ_NEW EventType = iota
|
||||
EVT_READ_FIN
|
||||
EVT_SEARCH_NEW
|
||||
EVT_SEARCH_PROGRESS
|
||||
EVT_SEARCH_FIN
|
||||
EVT_CLOSE
|
||||
)
|
153
src/core.go
Normal file
153
src/core.go
Normal file
@ -0,0 +1,153 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
const COORDINATOR_DELAY time.Duration = 100 * time.Millisecond
|
||||
|
||||
func initProcs() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
|
||||
/*
|
||||
Reader -> EVT_READ_FIN
|
||||
Reader -> EVT_READ_NEW -> Matcher (restart)
|
||||
Terminal -> EVT_SEARCH_NEW -> Matcher (restart)
|
||||
Matcher -> EVT_SEARCH_PROGRESS -> Terminal (update info)
|
||||
Matcher -> EVT_SEARCH_FIN -> Terminal (update list)
|
||||
*/
|
||||
|
||||
func Run(options *Options) {
|
||||
initProcs()
|
||||
|
||||
opts := ParseOptions()
|
||||
|
||||
if opts.Version {
|
||||
fmt.Println(VERSION)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Event channel
|
||||
eventBox := NewEventBox()
|
||||
|
||||
// Chunk list
|
||||
var chunkList *ChunkList
|
||||
if len(opts.WithNth) == 0 {
|
||||
chunkList = NewChunkList(func(data *string, index int) *Item {
|
||||
return &Item{text: data, index: index}
|
||||
})
|
||||
} else {
|
||||
chunkList = NewChunkList(func(data *string, index int) *Item {
|
||||
item := Item{text: data, index: index}
|
||||
tokens := Tokenize(item.text, opts.Delimiter)
|
||||
item.origText = item.text
|
||||
item.text = Transform(tokens, opts.WithNth).whole
|
||||
return &item
|
||||
})
|
||||
}
|
||||
|
||||
// Reader
|
||||
reader := Reader{func(str string) { chunkList.Push(str) }, eventBox}
|
||||
go reader.ReadSource()
|
||||
|
||||
// Matcher
|
||||
patternBuilder := func(runes []rune) *Pattern {
|
||||
return BuildPattern(
|
||||
opts.Mode, opts.Case, opts.Nth, opts.Delimiter, runes)
|
||||
}
|
||||
matcher := NewMatcher(patternBuilder, opts.Sort > 0, eventBox)
|
||||
|
||||
// Defered-interactive / Non-interactive
|
||||
// --select-1 | --exit-0 | --filter
|
||||
if filtering := opts.Filter != nil; filtering || opts.Select1 || opts.Exit0 {
|
||||
limit := 0
|
||||
var patternString string
|
||||
if filtering {
|
||||
patternString = *opts.Filter
|
||||
} else {
|
||||
if opts.Select1 || opts.Exit0 {
|
||||
limit = 1
|
||||
}
|
||||
patternString = opts.Query
|
||||
}
|
||||
pattern := patternBuilder([]rune(patternString))
|
||||
|
||||
looping := true
|
||||
for looping {
|
||||
eventBox.Wait(func(events *Events) {
|
||||
for evt, _ := range *events {
|
||||
switch evt {
|
||||
case EVT_READ_FIN:
|
||||
looping = false
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
time.Sleep(COORDINATOR_DELAY)
|
||||
}
|
||||
|
||||
matches, cancelled := matcher.scan(MatchRequest{
|
||||
chunks: chunkList.Snapshot(),
|
||||
pattern: pattern}, limit)
|
||||
|
||||
if !cancelled && (filtering || opts.Exit0) {
|
||||
if opts.PrintQuery {
|
||||
fmt.Println(patternString)
|
||||
}
|
||||
for _, item := range matches {
|
||||
item.Print()
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Go interactive
|
||||
go matcher.Loop()
|
||||
|
||||
// Terminal I/O
|
||||
terminal := NewTerminal(opts, eventBox)
|
||||
go terminal.Loop()
|
||||
|
||||
// Event coordination
|
||||
reading := true
|
||||
ticks := 0
|
||||
for {
|
||||
delay := true
|
||||
ticks += 1
|
||||
eventBox.Wait(func(events *Events) {
|
||||
defer events.Clear()
|
||||
for evt, value := range *events {
|
||||
switch evt {
|
||||
|
||||
case EVT_READ_NEW, EVT_READ_FIN:
|
||||
reading = reading && evt == EVT_READ_NEW
|
||||
terminal.UpdateCount(chunkList.Count(), !reading)
|
||||
matcher.Reset(chunkList.Snapshot(), terminal.Input(), false)
|
||||
|
||||
case EVT_SEARCH_NEW:
|
||||
matcher.Reset(chunkList.Snapshot(), terminal.Input(), true)
|
||||
delay = false
|
||||
|
||||
case EVT_SEARCH_PROGRESS:
|
||||
switch val := value.(type) {
|
||||
case float32:
|
||||
terminal.UpdateProgress(val)
|
||||
}
|
||||
|
||||
case EVT_SEARCH_FIN:
|
||||
switch val := value.(type) {
|
||||
case []*Item:
|
||||
terminal.UpdateList(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
if ticks > 3 && delay && reading {
|
||||
time.Sleep(COORDINATOR_DELAY)
|
||||
}
|
||||
}
|
||||
}
|
424
src/curses/curses.go
Normal file
424
src/curses/curses.go
Normal file
@ -0,0 +1,424 @@
|
||||
package curses
|
||||
|
||||
// #include <ncurses.h>
|
||||
// #include <locale.h>
|
||||
// #cgo LDFLAGS: -lncurses
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
RUNE = iota
|
||||
|
||||
CTRL_A
|
||||
CTRL_B
|
||||
CTRL_C
|
||||
CTRL_D
|
||||
CTRL_E
|
||||
CTRL_F
|
||||
CTRL_G
|
||||
CTRL_H
|
||||
TAB
|
||||
CTRL_J
|
||||
CTRL_K
|
||||
CTRL_L
|
||||
CTRL_M
|
||||
CTRL_N
|
||||
CTRL_O
|
||||
CTRL_P
|
||||
CTRL_Q
|
||||
CTRL_R
|
||||
CTRL_S
|
||||
CTRL_T
|
||||
CTRL_U
|
||||
CTRL_V
|
||||
CTRL_W
|
||||
CTRL_X
|
||||
CTRL_Y
|
||||
CTRL_Z
|
||||
ESC
|
||||
|
||||
INVALID
|
||||
MOUSE
|
||||
|
||||
BTAB
|
||||
|
||||
DEL
|
||||
PGUP
|
||||
PGDN
|
||||
|
||||
ALT_B
|
||||
ALT_F
|
||||
ALT_D
|
||||
ALT_BS
|
||||
)
|
||||
|
||||
const (
|
||||
COL_NORMAL = iota
|
||||
COL_PROMPT
|
||||
COL_MATCH
|
||||
COL_CURRENT
|
||||
COL_CURRENT_MATCH
|
||||
COL_SPINNER
|
||||
COL_INFO
|
||||
COL_CURSOR
|
||||
COL_SELECTED
|
||||
)
|
||||
|
||||
const (
|
||||
DOUBLE_CLICK_DURATION = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
type Event struct {
|
||||
Type int
|
||||
Char rune
|
||||
MouseEvent *MouseEvent
|
||||
}
|
||||
|
||||
type MouseEvent struct {
|
||||
Y int
|
||||
X int
|
||||
S int
|
||||
Down bool
|
||||
Double bool
|
||||
Mod bool
|
||||
}
|
||||
|
||||
var (
|
||||
_buf []byte
|
||||
_in *os.File
|
||||
_color func(int, bool) C.int
|
||||
_prevDownTime time.Time
|
||||
_prevDownY int
|
||||
_clickY []int
|
||||
)
|
||||
|
||||
func init() {
|
||||
_prevDownTime = time.Unix(0, 0)
|
||||
_clickY = []int{}
|
||||
}
|
||||
|
||||
func attrColored(pair int, bold bool) C.int {
|
||||
var attr C.int = 0
|
||||
if pair > COL_NORMAL {
|
||||
attr = C.COLOR_PAIR(C.int(pair))
|
||||
}
|
||||
if bold {
|
||||
attr = attr | C.A_BOLD
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
func attrMono(pair int, bold bool) C.int {
|
||||
var attr C.int = 0
|
||||
switch pair {
|
||||
case COL_CURRENT:
|
||||
if bold {
|
||||
attr = C.A_REVERSE
|
||||
}
|
||||
case COL_MATCH:
|
||||
attr = C.A_UNDERLINE
|
||||
case COL_CURRENT_MATCH:
|
||||
attr = C.A_UNDERLINE | C.A_REVERSE
|
||||
}
|
||||
if bold {
|
||||
attr = attr | C.A_BOLD
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
func MaxX() int {
|
||||
return int(C.COLS)
|
||||
}
|
||||
|
||||
func MaxY() int {
|
||||
return int(C.LINES)
|
||||
}
|
||||
|
||||
func getch(nonblock bool) int {
|
||||
b := make([]byte, 1)
|
||||
syscall.SetNonblock(int(_in.Fd()), nonblock)
|
||||
_, err := _in.Read(b)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
return int(b[0])
|
||||
}
|
||||
|
||||
func Init(color bool, color256 bool, black bool, mouse bool) {
|
||||
{
|
||||
in, err := os.OpenFile("/dev/tty", syscall.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
panic("Failed to open /dev/tty")
|
||||
}
|
||||
_in = in
|
||||
// Break STDIN
|
||||
// syscall.Dup2(int(in.Fd()), int(os.Stdin.Fd()))
|
||||
}
|
||||
|
||||
swapOutput()
|
||||
|
||||
C.setlocale(C.LC_ALL, C.CString(""))
|
||||
C.initscr()
|
||||
if mouse {
|
||||
C.mousemask(C.ALL_MOUSE_EVENTS, nil)
|
||||
}
|
||||
C.cbreak()
|
||||
C.noecho()
|
||||
C.raw() // stty dsusp undef
|
||||
C.set_tabsize(4) // FIXME
|
||||
|
||||
intChan := make(chan os.Signal, 1)
|
||||
signal.Notify(intChan, os.Interrupt, os.Kill)
|
||||
go func() {
|
||||
<-intChan
|
||||
Close()
|
||||
os.Exit(1)
|
||||
}()
|
||||
|
||||
if color {
|
||||
C.start_color()
|
||||
var bg C.short
|
||||
if black {
|
||||
bg = C.COLOR_BLACK
|
||||
} else {
|
||||
C.use_default_colors()
|
||||
bg = -1
|
||||
}
|
||||
if color256 {
|
||||
C.init_pair(COL_PROMPT, 110, bg)
|
||||
C.init_pair(COL_MATCH, 108, bg)
|
||||
C.init_pair(COL_CURRENT, 254, 236)
|
||||
C.init_pair(COL_CURRENT_MATCH, 151, 236)
|
||||
C.init_pair(COL_SPINNER, 148, bg)
|
||||
C.init_pair(COL_INFO, 144, bg)
|
||||
C.init_pair(COL_CURSOR, 161, 236)
|
||||
C.init_pair(COL_SELECTED, 168, 236)
|
||||
} else {
|
||||
C.init_pair(COL_PROMPT, C.COLOR_BLUE, bg)
|
||||
C.init_pair(COL_MATCH, C.COLOR_GREEN, bg)
|
||||
C.init_pair(COL_CURRENT, C.COLOR_YELLOW, C.COLOR_BLACK)
|
||||
C.init_pair(COL_CURRENT_MATCH, C.COLOR_GREEN, C.COLOR_BLACK)
|
||||
C.init_pair(COL_SPINNER, C.COLOR_GREEN, bg)
|
||||
C.init_pair(COL_INFO, C.COLOR_WHITE, bg)
|
||||
C.init_pair(COL_CURSOR, C.COLOR_RED, C.COLOR_BLACK)
|
||||
C.init_pair(COL_SELECTED, C.COLOR_MAGENTA, C.COLOR_BLACK)
|
||||
}
|
||||
_color = attrColored
|
||||
} else {
|
||||
_color = attrMono
|
||||
}
|
||||
}
|
||||
|
||||
func Close() {
|
||||
C.endwin()
|
||||
swapOutput()
|
||||
}
|
||||
|
||||
func swapOutput() {
|
||||
syscall.Dup2(2, 3)
|
||||
syscall.Dup2(1, 2)
|
||||
syscall.Dup2(3, 1)
|
||||
}
|
||||
|
||||
func GetBytes() []byte {
|
||||
c := getch(false)
|
||||
_buf = append(_buf, byte(c))
|
||||
|
||||
for {
|
||||
c = getch(true)
|
||||
if c == -1 {
|
||||
break
|
||||
}
|
||||
_buf = append(_buf, byte(c))
|
||||
}
|
||||
|
||||
return _buf
|
||||
}
|
||||
|
||||
// 27 (91 79) 77 type x y
|
||||
func mouseSequence(sz *int) Event {
|
||||
if len(_buf) < 6 {
|
||||
return Event{INVALID, 0, nil}
|
||||
}
|
||||
*sz = 6
|
||||
switch _buf[3] {
|
||||
case 32, 36, 40, 48, // mouse-down / shift / cmd / ctrl
|
||||
35, 39, 43, 51: // mouse-up / shift / cmd / ctrl
|
||||
mod := _buf[3] >= 36
|
||||
down := _buf[3]%2 == 0
|
||||
x := int(_buf[4] - 33)
|
||||
y := int(_buf[5] - 33)
|
||||
double := false
|
||||
if down {
|
||||
now := time.Now()
|
||||
if now.Sub(_prevDownTime) < DOUBLE_CLICK_DURATION {
|
||||
_clickY = append(_clickY, y)
|
||||
} else {
|
||||
_clickY = []int{y}
|
||||
}
|
||||
_prevDownTime = now
|
||||
} else {
|
||||
if len(_clickY) > 1 && _clickY[0] == _clickY[1] &&
|
||||
time.Now().Sub(_prevDownTime) < DOUBLE_CLICK_DURATION {
|
||||
double = true
|
||||
}
|
||||
}
|
||||
return Event{MOUSE, 0, &MouseEvent{y, x, 0, down, double, mod}}
|
||||
case 96, 100, 104, 112, // scroll-up / shift / cmd / ctrl
|
||||
97, 101, 105, 113: // scroll-down / shift / cmd / ctrl
|
||||
mod := _buf[3] >= 100
|
||||
s := 1 - int(_buf[3]%2)*2
|
||||
return Event{MOUSE, 0, &MouseEvent{0, 0, s, false, false, mod}}
|
||||
}
|
||||
return Event{INVALID, 0, nil}
|
||||
}
|
||||
|
||||
func escSequence(sz *int) Event {
|
||||
if len(_buf) < 2 {
|
||||
return Event{ESC, 0, nil}
|
||||
}
|
||||
*sz = 2
|
||||
switch _buf[1] {
|
||||
case 98:
|
||||
return Event{ALT_B, 0, nil}
|
||||
case 100:
|
||||
return Event{ALT_D, 0, nil}
|
||||
case 102:
|
||||
return Event{ALT_F, 0, nil}
|
||||
case 127:
|
||||
return Event{ALT_BS, 0, nil}
|
||||
case 91, 79:
|
||||
if len(_buf) < 3 {
|
||||
return Event{INVALID, 0, nil}
|
||||
}
|
||||
*sz = 3
|
||||
switch _buf[2] {
|
||||
case 68:
|
||||
return Event{CTRL_B, 0, nil}
|
||||
case 67:
|
||||
return Event{CTRL_F, 0, nil}
|
||||
case 66:
|
||||
return Event{CTRL_J, 0, nil}
|
||||
case 65:
|
||||
return Event{CTRL_K, 0, nil}
|
||||
case 90:
|
||||
return Event{BTAB, 0, nil}
|
||||
case 72:
|
||||
return Event{CTRL_A, 0, nil}
|
||||
case 70:
|
||||
return Event{CTRL_E, 0, nil}
|
||||
case 77:
|
||||
return mouseSequence(sz)
|
||||
case 49, 50, 51, 52, 53, 54:
|
||||
if len(_buf) < 4 {
|
||||
return Event{INVALID, 0, nil}
|
||||
}
|
||||
*sz = 4
|
||||
switch _buf[2] {
|
||||
case 50:
|
||||
return Event{INVALID, 0, nil} // INS
|
||||
case 51:
|
||||
return Event{DEL, 0, nil}
|
||||
case 52:
|
||||
return Event{CTRL_E, 0, nil}
|
||||
case 53:
|
||||
return Event{PGUP, 0, nil}
|
||||
case 54:
|
||||
return Event{PGDN, 0, nil}
|
||||
case 49:
|
||||
switch _buf[3] {
|
||||
case 126:
|
||||
return Event{CTRL_A, 0, nil}
|
||||
case 59:
|
||||
if len(_buf) != 6 {
|
||||
return Event{INVALID, 0, nil}
|
||||
}
|
||||
*sz = 6
|
||||
switch _buf[4] {
|
||||
case 50:
|
||||
switch _buf[5] {
|
||||
case 68:
|
||||
return Event{CTRL_A, 0, nil}
|
||||
case 67:
|
||||
return Event{CTRL_E, 0, nil}
|
||||
}
|
||||
case 53:
|
||||
switch _buf[5] {
|
||||
case 68:
|
||||
return Event{ALT_B, 0, nil}
|
||||
case 67:
|
||||
return Event{ALT_F, 0, nil}
|
||||
}
|
||||
} // _buf[4]
|
||||
} // _buf[3]
|
||||
} // _buf[2]
|
||||
} // _buf[2]
|
||||
} // _buf[1]
|
||||
return Event{INVALID, 0, nil}
|
||||
}
|
||||
|
||||
func GetChar() Event {
|
||||
if len(_buf) == 0 {
|
||||
_buf = GetBytes()
|
||||
}
|
||||
if len(_buf) == 0 {
|
||||
panic("Empty _buffer")
|
||||
}
|
||||
|
||||
sz := 1
|
||||
defer func() {
|
||||
_buf = _buf[sz:]
|
||||
}()
|
||||
|
||||
switch _buf[0] {
|
||||
case CTRL_C, CTRL_G, CTRL_Q:
|
||||
return Event{CTRL_C, 0, nil}
|
||||
case 127:
|
||||
return Event{CTRL_H, 0, nil}
|
||||
case ESC:
|
||||
return escSequence(&sz)
|
||||
}
|
||||
|
||||
// CTRL-A ~ CTRL-Z
|
||||
if _buf[0] <= CTRL_Z {
|
||||
return Event{int(_buf[0]), 0, nil}
|
||||
}
|
||||
r, rsz := utf8.DecodeRune(_buf)
|
||||
sz = rsz
|
||||
return Event{RUNE, r, nil}
|
||||
}
|
||||
|
||||
func Move(y int, x int) {
|
||||
C.move(C.int(y), C.int(x))
|
||||
}
|
||||
|
||||
func MoveAndClear(y int, x int) {
|
||||
Move(y, x)
|
||||
C.clrtoeol()
|
||||
}
|
||||
|
||||
func Print(text string) {
|
||||
C.addstr(C.CString(text))
|
||||
}
|
||||
|
||||
func CPrint(pair int, bold bool, text string) {
|
||||
attr := _color(pair, bold)
|
||||
C.attron(attr)
|
||||
Print(text)
|
||||
C.attroff(attr)
|
||||
}
|
||||
|
||||
func Clear() {
|
||||
C.clear()
|
||||
}
|
||||
|
||||
func Refresh() {
|
||||
C.refresh()
|
||||
}
|
48
src/eventbox.go
Normal file
48
src/eventbox.go
Normal file
@ -0,0 +1,48 @@
|
||||
package fzf
|
||||
|
||||
import "sync"
|
||||
|
||||
type EventType int
|
||||
|
||||
type Events map[EventType]interface{}
|
||||
|
||||
type EventBox struct {
|
||||
events Events
|
||||
cond *sync.Cond
|
||||
}
|
||||
|
||||
func NewEventBox() *EventBox {
|
||||
return &EventBox{make(Events), sync.NewCond(&sync.Mutex{})}
|
||||
}
|
||||
|
||||
func (b *EventBox) Wait(callback func(*Events)) {
|
||||
b.cond.L.Lock()
|
||||
defer b.cond.L.Unlock()
|
||||
|
||||
if len(b.events) == 0 {
|
||||
b.cond.Wait()
|
||||
}
|
||||
|
||||
callback(&b.events)
|
||||
}
|
||||
|
||||
func (b *EventBox) Set(event EventType, value interface{}) {
|
||||
b.cond.L.Lock()
|
||||
defer b.cond.L.Unlock()
|
||||
b.events[event] = value
|
||||
b.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Unsynchronized; should be called within Wait routine
|
||||
func (events *Events) Clear() {
|
||||
for event := range *events {
|
||||
delete(*events, event)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *EventBox) Peak(event EventType) bool {
|
||||
b.cond.L.Lock()
|
||||
defer b.cond.L.Unlock()
|
||||
_, ok := b.events[event]
|
||||
return ok
|
||||
}
|
7
src/fzf/main.go
Normal file
7
src/fzf/main.go
Normal file
@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "github.com/junegunn/fzf/src"
|
||||
|
||||
func main() {
|
||||
fzf.Run(fzf.ParseOptions())
|
||||
}
|
135
src/item.go
Normal file
135
src/item.go
Normal file
@ -0,0 +1,135 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Offset [2]int
|
||||
|
||||
type Item struct {
|
||||
text *string
|
||||
origText *string
|
||||
offsets []Offset
|
||||
index int
|
||||
rank Rank
|
||||
transformed *Transformed
|
||||
}
|
||||
|
||||
type Rank [3]int
|
||||
|
||||
var NilRank = Rank{-1, 0, 0}
|
||||
|
||||
func (i *Item) Rank() Rank {
|
||||
if i.rank[0] > 0 {
|
||||
return i.rank
|
||||
}
|
||||
sort.Sort(ByOrder(i.offsets))
|
||||
matchlen := 0
|
||||
prevEnd := 0
|
||||
for _, offset := range i.offsets {
|
||||
begin := offset[0]
|
||||
end := offset[1]
|
||||
if prevEnd > begin {
|
||||
begin = prevEnd
|
||||
}
|
||||
if end > prevEnd {
|
||||
prevEnd = end
|
||||
}
|
||||
if end > begin {
|
||||
matchlen += end - begin
|
||||
}
|
||||
}
|
||||
i.rank = Rank{matchlen, len(*i.text), i.index}
|
||||
return i.rank
|
||||
}
|
||||
|
||||
func (i *Item) Print() {
|
||||
if i.origText != nil {
|
||||
fmt.Println(*i.origText)
|
||||
} else {
|
||||
fmt.Println(*i.text)
|
||||
}
|
||||
}
|
||||
|
||||
type ByOrder []Offset
|
||||
|
||||
func (a ByOrder) Len() int {
|
||||
return len(a)
|
||||
}
|
||||
|
||||
func (a ByOrder) Swap(i, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
func (a ByOrder) Less(i, j int) bool {
|
||||
ioff := a[i]
|
||||
joff := a[j]
|
||||
return (ioff[0] < joff[0]) || (ioff[0] == joff[0]) && (ioff[1] <= joff[1])
|
||||
}
|
||||
|
||||
type ByRelevance []*Item
|
||||
|
||||
func (a ByRelevance) Len() int {
|
||||
return len(a)
|
||||
}
|
||||
|
||||
func (a ByRelevance) Swap(i, j int) {
|
||||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
func (a ByRelevance) Less(i, j int) bool {
|
||||
irank := a[i].Rank()
|
||||
jrank := a[j].Rank()
|
||||
|
||||
return compareRanks(irank, jrank)
|
||||
}
|
||||
|
||||
func compareRanks(irank Rank, jrank Rank) bool {
|
||||
for idx := range irank {
|
||||
if irank[idx] < jrank[idx] {
|
||||
return true
|
||||
} else if irank[idx] > jrank[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func SortMerge(partialResults [][]*Item) []*Item {
|
||||
if len(partialResults) == 1 {
|
||||
return partialResults[0]
|
||||
}
|
||||
|
||||
merged := []*Item{}
|
||||
|
||||
for len(partialResults) > 0 {
|
||||
minRank := Rank{0, 0, 0}
|
||||
minIdx := -1
|
||||
|
||||
for idx, partialResult := range partialResults {
|
||||
if len(partialResult) > 0 {
|
||||
rank := partialResult[0].Rank()
|
||||
if minIdx < 0 || compareRanks(rank, minRank) {
|
||||
minRank = rank
|
||||
minIdx = idx
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if minIdx >= 0 {
|
||||
merged = append(merged, partialResults[minIdx][0])
|
||||
partialResults[minIdx] = partialResults[minIdx][1:]
|
||||
}
|
||||
|
||||
nonEmptyPartialResults := make([][]*Item, 0, len(partialResults))
|
||||
for _, partialResult := range partialResults {
|
||||
if len(partialResult) > 0 {
|
||||
nonEmptyPartialResults = append(nonEmptyPartialResults, partialResult)
|
||||
}
|
||||
}
|
||||
partialResults = nonEmptyPartialResults
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
78
src/item_test.go
Normal file
78
src/item_test.go
Normal file
@ -0,0 +1,78 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestOffsetSort(t *testing.T) {
|
||||
offsets := []Offset{
|
||||
Offset{3, 5}, Offset{2, 7},
|
||||
Offset{1, 3}, Offset{2, 9}}
|
||||
sort.Sort(ByOrder(offsets))
|
||||
|
||||
if offsets[0][0] != 1 || offsets[0][1] != 3 ||
|
||||
offsets[1][0] != 2 || offsets[1][1] != 7 ||
|
||||
offsets[2][0] != 2 || offsets[2][1] != 9 ||
|
||||
offsets[3][0] != 3 || offsets[3][1] != 5 {
|
||||
t.Error("Invalid order:", offsets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRankComparison(t *testing.T) {
|
||||
if compareRanks(Rank{3, 0, 5}, Rank{2, 0, 7}) ||
|
||||
!compareRanks(Rank{3, 0, 5}, Rank{3, 0, 6}) ||
|
||||
!compareRanks(Rank{1, 2, 3}, Rank{1, 3, 2}) ||
|
||||
!compareRanks(NilRank, Rank{0, 0, 0}) ||
|
||||
compareRanks(Rank{0, 0, 0}, NilRank) {
|
||||
t.Error("Invalid order")
|
||||
}
|
||||
}
|
||||
|
||||
// Match length, string length, index
|
||||
func TestItemRank(t *testing.T) {
|
||||
strs := []string{"foo", "foobar", "bar", "baz"}
|
||||
item1 := Item{text: &strs[0], index: 1, offsets: []Offset{}}
|
||||
rank1 := item1.Rank()
|
||||
if rank1[0] != 0 || rank1[1] != 3 || rank1[2] != 1 {
|
||||
t.Error(item1.Rank())
|
||||
}
|
||||
// Only differ in index
|
||||
item2 := Item{text: &strs[0], index: 0, offsets: []Offset{}}
|
||||
|
||||
items := []*Item{&item1, &item2}
|
||||
sort.Sort(ByRelevance(items))
|
||||
if items[0] != &item2 || items[1] != &item1 {
|
||||
t.Error(items)
|
||||
}
|
||||
|
||||
items = []*Item{&item2, &item1, &item1, &item2}
|
||||
sort.Sort(ByRelevance(items))
|
||||
if items[0] != &item2 || items[1] != &item2 ||
|
||||
items[2] != &item1 || items[3] != &item1 {
|
||||
t.Error(items)
|
||||
}
|
||||
|
||||
// Sort by relevance
|
||||
item3 := Item{text: &strs[1], index: 2, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
|
||||
item4 := Item{text: &strs[1], index: 2, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
|
||||
item5 := Item{text: &strs[2], index: 2, offsets: []Offset{Offset{1, 3}, Offset{5, 7}}}
|
||||
item6 := Item{text: &strs[2], index: 2, offsets: []Offset{Offset{1, 2}, Offset{6, 7}}}
|
||||
items = []*Item{&item1, &item2, &item3, &item4, &item5, &item6}
|
||||
sort.Sort(ByRelevance(items))
|
||||
if items[0] != &item2 || items[1] != &item1 ||
|
||||
items[2] != &item6 || items[3] != &item4 ||
|
||||
items[4] != &item5 || items[5] != &item3 {
|
||||
t.Error(items)
|
||||
}
|
||||
|
||||
// Sort merged lists
|
||||
lists := [][]*Item{
|
||||
[]*Item{&item2, &item4, &item5}, []*Item{&item1, &item6}, []*Item{&item3}}
|
||||
items = SortMerge(lists)
|
||||
if items[0] != &item2 || items[1] != &item1 ||
|
||||
items[2] != &item6 || items[3] != &item4 ||
|
||||
items[4] != &item5 || items[5] != &item3 {
|
||||
t.Error(items)
|
||||
}
|
||||
}
|
215
src/matcher.go
Normal file
215
src/matcher.go
Normal file
@ -0,0 +1,215 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
type MatchRequest struct {
|
||||
chunks []*Chunk
|
||||
pattern *Pattern
|
||||
}
|
||||
|
||||
type Matcher struct {
|
||||
patternBuilder func([]rune) *Pattern
|
||||
sort bool
|
||||
eventBox *EventBox
|
||||
reqBox *EventBox
|
||||
partitions int
|
||||
queryCache QueryCache
|
||||
}
|
||||
|
||||
const (
|
||||
REQ_RETRY EventType = iota
|
||||
REQ_RESET
|
||||
)
|
||||
|
||||
const (
|
||||
STAT_CANCELLED int = iota
|
||||
STAT_QCH
|
||||
STAT_CHUNKS
|
||||
)
|
||||
|
||||
const (
|
||||
PROGRESS_MIN_DURATION = 200 * time.Millisecond
|
||||
)
|
||||
|
||||
func NewMatcher(patternBuilder func([]rune) *Pattern,
|
||||
sort bool, eventBox *EventBox) *Matcher {
|
||||
return &Matcher{
|
||||
patternBuilder: patternBuilder,
|
||||
sort: sort,
|
||||
eventBox: eventBox,
|
||||
reqBox: NewEventBox(),
|
||||
partitions: runtime.NumCPU(),
|
||||
queryCache: make(QueryCache)}
|
||||
}
|
||||
|
||||
func (m *Matcher) Loop() {
|
||||
prevCount := 0
|
||||
|
||||
for {
|
||||
var request MatchRequest
|
||||
|
||||
m.reqBox.Wait(func(events *Events) {
|
||||
for _, val := range *events {
|
||||
switch val := val.(type) {
|
||||
case MatchRequest:
|
||||
request = val
|
||||
default:
|
||||
panic(fmt.Sprintf("Unexpected type: %T", val))
|
||||
}
|
||||
}
|
||||
events.Clear()
|
||||
})
|
||||
|
||||
// Restart search
|
||||
patternString := request.pattern.AsString()
|
||||
allMatches := []*Item{}
|
||||
cancelled := false
|
||||
count := CountItems(request.chunks)
|
||||
|
||||
foundCache := false
|
||||
if count == prevCount {
|
||||
// Look up queryCache
|
||||
if cached, found := m.queryCache[patternString]; found {
|
||||
foundCache = true
|
||||
allMatches = cached
|
||||
}
|
||||
} else {
|
||||
// Invalidate queryCache
|
||||
prevCount = count
|
||||
m.queryCache = make(QueryCache)
|
||||
}
|
||||
|
||||
if !foundCache {
|
||||
allMatches, cancelled = m.scan(request, 0)
|
||||
}
|
||||
|
||||
if !cancelled {
|
||||
m.queryCache[patternString] = allMatches
|
||||
m.eventBox.Set(EVT_SEARCH_FIN, allMatches)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Matcher) sliceChunks(chunks []*Chunk) [][]*Chunk {
|
||||
perSlice := len(chunks) / m.partitions
|
||||
|
||||
// No need to parallelize
|
||||
if perSlice == 0 {
|
||||
return [][]*Chunk{chunks}
|
||||
}
|
||||
|
||||
slices := make([][]*Chunk, m.partitions)
|
||||
for i := 0; i < m.partitions; i++ {
|
||||
start := i * perSlice
|
||||
end := start + perSlice
|
||||
if i == m.partitions-1 {
|
||||
end = len(chunks)
|
||||
}
|
||||
slices[i] = chunks[start:end]
|
||||
}
|
||||
return slices
|
||||
}
|
||||
|
||||
type partialResult struct {
|
||||
index int
|
||||
matches []*Item
|
||||
}
|
||||
|
||||
func (m *Matcher) scan(request MatchRequest, limit int) ([]*Item, bool) {
|
||||
startedAt := time.Now()
|
||||
|
||||
numChunks := len(request.chunks)
|
||||
if numChunks == 0 {
|
||||
return []*Item{}, false
|
||||
}
|
||||
pattern := request.pattern
|
||||
empty := pattern.IsEmpty()
|
||||
cancelled := NewAtomicBool(false)
|
||||
|
||||
slices := m.sliceChunks(request.chunks)
|
||||
numSlices := len(slices)
|
||||
resultChan := make(chan partialResult, numSlices)
|
||||
countChan := make(chan int, numSlices)
|
||||
|
||||
for idx, chunks := range slices {
|
||||
go func(idx int, chunks []*Chunk) {
|
||||
sliceMatches := []*Item{}
|
||||
for _, chunk := range chunks {
|
||||
var matches []*Item
|
||||
if empty {
|
||||
matches = *chunk
|
||||
} else {
|
||||
matches = request.pattern.Match(chunk)
|
||||
}
|
||||
sliceMatches = append(sliceMatches, matches...)
|
||||
if cancelled.Get() {
|
||||
return
|
||||
}
|
||||
countChan <- len(sliceMatches)
|
||||
}
|
||||
if !empty && m.sort {
|
||||
sort.Sort(ByRelevance(sliceMatches))
|
||||
}
|
||||
resultChan <- partialResult{idx, sliceMatches}
|
||||
}(idx, chunks)
|
||||
}
|
||||
|
||||
count := 0
|
||||
matchCount := 0
|
||||
for matchesInChunk := range countChan {
|
||||
count += 1
|
||||
matchCount += matchesInChunk
|
||||
|
||||
if limit > 0 && matchCount > limit {
|
||||
return nil, true // For --select-1 and --exit-0
|
||||
}
|
||||
|
||||
if count == numChunks {
|
||||
break
|
||||
}
|
||||
|
||||
if !empty && m.reqBox.Peak(REQ_RESET) {
|
||||
cancelled.Set(true)
|
||||
return nil, true
|
||||
}
|
||||
|
||||
if time.Now().Sub(startedAt) > PROGRESS_MIN_DURATION {
|
||||
m.eventBox.Set(EVT_SEARCH_PROGRESS, float32(count)/float32(numChunks))
|
||||
}
|
||||
}
|
||||
|
||||
partialResults := make([][]*Item, numSlices)
|
||||
for range slices {
|
||||
partialResult := <-resultChan
|
||||
partialResults[partialResult.index] = partialResult.matches
|
||||
}
|
||||
|
||||
var allMatches []*Item
|
||||
if empty || !m.sort {
|
||||
allMatches = []*Item{}
|
||||
for _, matches := range partialResults {
|
||||
allMatches = append(allMatches, matches...)
|
||||
}
|
||||
} else {
|
||||
allMatches = SortMerge(partialResults)
|
||||
}
|
||||
|
||||
return allMatches, false
|
||||
}
|
||||
|
||||
func (m *Matcher) Reset(chunks []*Chunk, patternRunes []rune, cancel bool) {
|
||||
pattern := m.patternBuilder(patternRunes)
|
||||
|
||||
var event EventType
|
||||
if cancel {
|
||||
event = REQ_RESET
|
||||
} else {
|
||||
event = REQ_RETRY
|
||||
}
|
||||
m.reqBox.Set(event, MatchRequest{chunks, pattern})
|
||||
}
|
276
src/options.go
Normal file
276
src/options.go
Normal file
@ -0,0 +1,276 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/junegunn/go-shellwords"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const USAGE = `usage: fzf [options]
|
||||
|
||||
Search
|
||||
-x, --extended Extended-search mode
|
||||
-e, --extended-exact Extended-search mode (exact match)
|
||||
-i Case-insensitive match (default: smart-case match)
|
||||
+i Case-sensitive match
|
||||
-n, --nth=N[,..] Comma-separated list of field index expressions
|
||||
for limiting search scope. Each can be a non-zero
|
||||
integer or a range expression ([BEGIN]..[END])
|
||||
--with-nth=N[,..] Transform the item using index expressions for search
|
||||
-d, --delimiter=STR Field delimiter regex for --nth (default: AWK-style)
|
||||
|
||||
Search result
|
||||
-s, --sort Sort the result
|
||||
+s, --no-sort Do not sort the result. Keep the sequence unchanged.
|
||||
|
||||
Interface
|
||||
-m, --multi Enable multi-select with tab/shift-tab
|
||||
--no-mouse Disable mouse
|
||||
+c, --no-color Disable colors
|
||||
+2, --no-256 Disable 256-color
|
||||
--black Use black background
|
||||
--reverse Reverse orientation
|
||||
--prompt=STR Input prompt (default: '> ')
|
||||
|
||||
Scripting
|
||||
-q, --query=STR Start the finder with the given query
|
||||
-1, --select-1 Automatically select the only match
|
||||
-0, --exit-0 Exit immediately when there's no match
|
||||
-f, --filter=STR Filter mode. Do not start interactive finder.
|
||||
--print-query Print query as the first line
|
||||
|
||||
Environment variables
|
||||
FZF_DEFAULT_COMMAND Default command to use when input is tty
|
||||
FZF_DEFAULT_OPTS Defaults options. (e.g. "-x -m")
|
||||
|
||||
`
|
||||
|
||||
type Mode int
|
||||
|
||||
const (
|
||||
MODE_FUZZY Mode = iota
|
||||
MODE_EXTENDED
|
||||
MODE_EXTENDED_EXACT
|
||||
)
|
||||
|
||||
type Case int
|
||||
|
||||
const (
|
||||
CASE_SMART Case = iota
|
||||
CASE_IGNORE
|
||||
CASE_RESPECT
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Mode Mode
|
||||
Case Case
|
||||
Nth []Range
|
||||
WithNth []Range
|
||||
Delimiter *regexp.Regexp
|
||||
Sort int
|
||||
Multi bool
|
||||
Mouse bool
|
||||
Color bool
|
||||
Color256 bool
|
||||
Black bool
|
||||
Reverse bool
|
||||
Prompt string
|
||||
Query string
|
||||
Select1 bool
|
||||
Exit0 bool
|
||||
Filter *string
|
||||
PrintQuery bool
|
||||
Version bool
|
||||
}
|
||||
|
||||
func DefaultOptions() *Options {
|
||||
return &Options{
|
||||
Mode: MODE_FUZZY,
|
||||
Case: CASE_SMART,
|
||||
Nth: make([]Range, 0),
|
||||
WithNth: make([]Range, 0),
|
||||
Delimiter: nil,
|
||||
Sort: 1000,
|
||||
Multi: false,
|
||||
Mouse: true,
|
||||
Color: true,
|
||||
Color256: strings.Contains(os.Getenv("TERM"), "256"),
|
||||
Black: false,
|
||||
Reverse: false,
|
||||
Prompt: "> ",
|
||||
Query: "",
|
||||
Select1: false,
|
||||
Exit0: false,
|
||||
Filter: nil,
|
||||
PrintQuery: false,
|
||||
Version: false}
|
||||
}
|
||||
|
||||
func help(ok int) {
|
||||
os.Stderr.WriteString(USAGE)
|
||||
os.Exit(ok)
|
||||
}
|
||||
|
||||
func errorExit(msg string) {
|
||||
os.Stderr.WriteString(msg + "\n")
|
||||
help(1)
|
||||
}
|
||||
|
||||
func optString(arg string, prefix string) (bool, string) {
|
||||
rx, _ := regexp.Compile(fmt.Sprintf("^(?:%s)(.*)$", prefix))
|
||||
matches := rx.FindStringSubmatch(arg)
|
||||
if len(matches) > 1 {
|
||||
return true, matches[1]
|
||||
} else {
|
||||
return false, ""
|
||||
}
|
||||
}
|
||||
|
||||
func nextString(args []string, i *int, message string) string {
|
||||
if len(args) > *i+1 {
|
||||
*i++
|
||||
} else {
|
||||
errorExit(message)
|
||||
}
|
||||
return args[*i]
|
||||
}
|
||||
|
||||
func optionalNumeric(args []string, i *int) int {
|
||||
if len(args) > *i+1 {
|
||||
if strings.IndexAny(args[*i+1], "0123456789") == 0 {
|
||||
*i++
|
||||
}
|
||||
}
|
||||
return 1 // Don't care
|
||||
}
|
||||
|
||||
func splitNth(str string) []Range {
|
||||
if match, _ := regexp.MatchString("^[0-9,-.]+$", str); !match {
|
||||
errorExit("invalid format: " + str)
|
||||
}
|
||||
|
||||
tokens := strings.Split(str, ",")
|
||||
ranges := make([]Range, len(tokens))
|
||||
for idx, s := range tokens {
|
||||
r, ok := ParseRange(&s)
|
||||
if !ok {
|
||||
errorExit("invalid format: " + str)
|
||||
}
|
||||
ranges[idx] = r
|
||||
}
|
||||
return ranges
|
||||
}
|
||||
|
||||
func delimiterRegexp(str string) *regexp.Regexp {
|
||||
rx, e := regexp.Compile(str)
|
||||
if e != nil {
|
||||
str = regexp.QuoteMeta(str)
|
||||
}
|
||||
|
||||
rx, e = regexp.Compile(fmt.Sprintf("(?:.*?%s)|(?:.+?$)", str))
|
||||
if e != nil {
|
||||
errorExit("invalid regular expression: " + e.Error())
|
||||
}
|
||||
return rx
|
||||
}
|
||||
|
||||
func parseOptions(opts *Options, allArgs []string) {
|
||||
for i := 0; i < len(allArgs); i++ {
|
||||
arg := allArgs[i]
|
||||
switch arg {
|
||||
case "-h", "--help":
|
||||
help(0)
|
||||
case "-x", "--extended":
|
||||
opts.Mode = MODE_EXTENDED
|
||||
case "-e", "--extended-exact":
|
||||
opts.Mode = MODE_EXTENDED_EXACT
|
||||
case "+x", "--no-extended", "+e", "--no-extended-exact":
|
||||
opts.Mode = MODE_FUZZY
|
||||
case "-q", "--query":
|
||||
opts.Query = nextString(allArgs, &i, "query string required")
|
||||
case "-f", "--filter":
|
||||
filter := nextString(allArgs, &i, "query string required")
|
||||
opts.Filter = &filter
|
||||
case "-d", "--delimiter":
|
||||
opts.Delimiter = delimiterRegexp(nextString(allArgs, &i, "delimiter required"))
|
||||
case "-n", "--nth":
|
||||
opts.Nth = splitNth(nextString(allArgs, &i, "nth expression required"))
|
||||
case "--with-nth":
|
||||
opts.WithNth = splitNth(nextString(allArgs, &i, "nth expression required"))
|
||||
case "-s", "--sort":
|
||||
opts.Sort = optionalNumeric(allArgs, &i)
|
||||
case "+s", "--no-sort":
|
||||
opts.Sort = 0
|
||||
case "-i":
|
||||
opts.Case = CASE_IGNORE
|
||||
case "+i":
|
||||
opts.Case = CASE_RESPECT
|
||||
case "-m", "--multi":
|
||||
opts.Multi = true
|
||||
case "+m", "--no-multi":
|
||||
opts.Multi = false
|
||||
case "--no-mouse":
|
||||
opts.Mouse = false
|
||||
case "+c", "--no-color":
|
||||
opts.Color = false
|
||||
case "+2", "--no-256":
|
||||
opts.Color256 = false
|
||||
case "--black":
|
||||
opts.Black = true
|
||||
case "--no-black":
|
||||
opts.Black = false
|
||||
case "--reverse":
|
||||
opts.Reverse = true
|
||||
case "--no-reverse":
|
||||
opts.Reverse = false
|
||||
case "-1", "--select-1":
|
||||
opts.Select1 = true
|
||||
case "+1", "--no-select-1":
|
||||
opts.Select1 = false
|
||||
case "-0", "--exit-0":
|
||||
opts.Exit0 = true
|
||||
case "+0", "--no-exit-0":
|
||||
opts.Exit0 = false
|
||||
case "--print-query":
|
||||
opts.PrintQuery = true
|
||||
case "--no-print-query":
|
||||
opts.PrintQuery = false
|
||||
case "--prompt":
|
||||
opts.Prompt = nextString(allArgs, &i, "prompt string required")
|
||||
case "--version":
|
||||
opts.Version = true
|
||||
default:
|
||||
if match, value := optString(arg, "-q|--query="); match {
|
||||
opts.Query = value
|
||||
} else if match, value := optString(arg, "-f|--filter="); match {
|
||||
opts.Filter = &value
|
||||
} else if match, value := optString(arg, "-d|--delimiter="); match {
|
||||
opts.Delimiter = delimiterRegexp(value)
|
||||
} else if match, value := optString(arg, "--prompt="); match {
|
||||
opts.Prompt = value
|
||||
} else if match, value := optString(arg, "-n|--nth="); match {
|
||||
opts.Nth = splitNth(value)
|
||||
} else if match, value := optString(arg, "--with-nth="); match {
|
||||
opts.WithNth = splitNth(value)
|
||||
} else if match, _ := optString(arg, "-s|--sort="); match {
|
||||
opts.Sort = 1 // Don't care
|
||||
} else {
|
||||
errorExit("unknown option: " + arg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ParseOptions() *Options {
|
||||
opts := DefaultOptions()
|
||||
|
||||
// Options from Env var
|
||||
words, _ := shellwords.Parse(os.Getenv("FZF_DEFAULT_OPTS"))
|
||||
parseOptions(opts, words)
|
||||
|
||||
// Options from command-line arguments
|
||||
parseOptions(opts, os.Args[1:])
|
||||
return opts
|
||||
}
|
37
src/options_test.go
Normal file
37
src/options_test.go
Normal file
@ -0,0 +1,37 @@
|
||||
package fzf
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestDelimiterRegex(t *testing.T) {
|
||||
rx := delimiterRegexp("*")
|
||||
tokens := rx.FindAllString("-*--*---**---", -1)
|
||||
if tokens[0] != "-*" || tokens[1] != "--*" || tokens[2] != "---*" ||
|
||||
tokens[3] != "*" || tokens[4] != "---" {
|
||||
t.Errorf("%s %s %d", rx, tokens, len(tokens))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitNth(t *testing.T) {
|
||||
{
|
||||
ranges := splitNth("..")
|
||||
if len(ranges) != 1 ||
|
||||
ranges[0].begin != RANGE_ELLIPSIS ||
|
||||
ranges[0].end != RANGE_ELLIPSIS {
|
||||
t.Errorf("%s", ranges)
|
||||
}
|
||||
}
|
||||
{
|
||||
ranges := splitNth("..3,1..,2..3,4..-1,-3..-2,..,2,-2")
|
||||
if len(ranges) != 8 ||
|
||||
ranges[0].begin != RANGE_ELLIPSIS || ranges[0].end != 3 ||
|
||||
ranges[1].begin != 1 || ranges[1].end != RANGE_ELLIPSIS ||
|
||||
ranges[2].begin != 2 || ranges[2].end != 3 ||
|
||||
ranges[3].begin != 4 || ranges[3].end != -1 ||
|
||||
ranges[4].begin != -3 || ranges[4].end != -2 ||
|
||||
ranges[5].begin != RANGE_ELLIPSIS || ranges[5].end != RANGE_ELLIPSIS ||
|
||||
ranges[6].begin != 2 || ranges[6].end != 2 ||
|
||||
ranges[7].begin != -2 || ranges[7].end != -2 {
|
||||
t.Errorf("%s", ranges)
|
||||
}
|
||||
}
|
||||
}
|
305
src/pattern.go
Normal file
305
src/pattern.go
Normal file
@ -0,0 +1,305 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const UPPERCASE = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
// fuzzy
|
||||
// 'exact
|
||||
// ^exact-prefix
|
||||
// exact-suffix$
|
||||
// !not-fuzzy
|
||||
// !'not-exact
|
||||
// !^not-exact-prefix
|
||||
// !not-exact-suffix$
|
||||
|
||||
type TermType int
|
||||
|
||||
const (
|
||||
TERM_FUZZY TermType = iota
|
||||
TERM_EXACT
|
||||
TERM_PREFIX
|
||||
TERM_SUFFIX
|
||||
)
|
||||
|
||||
type Term struct {
|
||||
typ TermType
|
||||
inv bool
|
||||
text []rune
|
||||
origText []rune
|
||||
}
|
||||
|
||||
type Pattern struct {
|
||||
mode Mode
|
||||
caseSensitive bool
|
||||
text []rune
|
||||
terms []Term
|
||||
hasInvTerm bool
|
||||
delimiter *regexp.Regexp
|
||||
nth []Range
|
||||
procFun map[TermType]func(bool, *string, []rune) (int, int)
|
||||
}
|
||||
|
||||
var (
|
||||
_patternCache map[string]*Pattern
|
||||
_splitRegex *regexp.Regexp
|
||||
_cache ChunkCache
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We can uniquely identify the pattern for a given string since
|
||||
// mode and caseMode do not change while the program is running
|
||||
_patternCache = make(map[string]*Pattern)
|
||||
_splitRegex = regexp.MustCompile("\\s+")
|
||||
_cache = NewChunkCache()
|
||||
}
|
||||
|
||||
func clearPatternCache() {
|
||||
_patternCache = make(map[string]*Pattern)
|
||||
}
|
||||
|
||||
func BuildPattern(mode Mode, caseMode Case,
|
||||
nth []Range, delimiter *regexp.Regexp, runes []rune) *Pattern {
|
||||
|
||||
var asString string
|
||||
switch mode {
|
||||
case MODE_EXTENDED, MODE_EXTENDED_EXACT:
|
||||
asString = strings.Trim(string(runes), " ")
|
||||
default:
|
||||
asString = string(runes)
|
||||
}
|
||||
|
||||
cached, found := _patternCache[asString]
|
||||
if found {
|
||||
return cached
|
||||
}
|
||||
|
||||
caseSensitive, hasInvTerm := true, false
|
||||
terms := []Term{}
|
||||
|
||||
switch caseMode {
|
||||
case CASE_SMART:
|
||||
if !strings.ContainsAny(asString, UPPERCASE) {
|
||||
runes, caseSensitive = []rune(strings.ToLower(asString)), false
|
||||
}
|
||||
case CASE_IGNORE:
|
||||
runes, caseSensitive = []rune(strings.ToLower(asString)), false
|
||||
}
|
||||
|
||||
switch mode {
|
||||
case MODE_EXTENDED, MODE_EXTENDED_EXACT:
|
||||
terms = parseTerms(mode, string(runes))
|
||||
for _, term := range terms {
|
||||
if term.inv {
|
||||
hasInvTerm = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ptr := &Pattern{
|
||||
mode: mode,
|
||||
caseSensitive: caseSensitive,
|
||||
text: runes,
|
||||
terms: terms,
|
||||
hasInvTerm: hasInvTerm,
|
||||
nth: nth,
|
||||
delimiter: delimiter,
|
||||
procFun: make(map[TermType]func(bool, *string, []rune) (int, int))}
|
||||
|
||||
ptr.procFun[TERM_FUZZY] = FuzzyMatch
|
||||
ptr.procFun[TERM_EXACT] = ExactMatchNaive
|
||||
ptr.procFun[TERM_PREFIX] = PrefixMatch
|
||||
ptr.procFun[TERM_SUFFIX] = SuffixMatch
|
||||
|
||||
_patternCache[asString] = ptr
|
||||
return ptr
|
||||
}
|
||||
|
||||
func parseTerms(mode Mode, str string) []Term {
|
||||
tokens := _splitRegex.Split(str, -1)
|
||||
terms := []Term{}
|
||||
for _, token := range tokens {
|
||||
typ, inv, text := TERM_FUZZY, false, token
|
||||
origText := []rune(text)
|
||||
if mode == MODE_EXTENDED_EXACT {
|
||||
typ = TERM_EXACT
|
||||
}
|
||||
|
||||
if strings.HasPrefix(text, "!") {
|
||||
inv = true
|
||||
text = text[1:]
|
||||
}
|
||||
|
||||
if strings.HasPrefix(text, "'") {
|
||||
if mode == MODE_EXTENDED {
|
||||
typ = TERM_EXACT
|
||||
text = text[1:]
|
||||
}
|
||||
} else if strings.HasPrefix(text, "^") {
|
||||
typ = TERM_PREFIX
|
||||
text = text[1:]
|
||||
} else if strings.HasSuffix(text, "$") {
|
||||
typ = TERM_SUFFIX
|
||||
text = text[:len(text)-1]
|
||||
}
|
||||
|
||||
if len(text) > 0 {
|
||||
terms = append(terms, Term{
|
||||
typ: typ,
|
||||
inv: inv,
|
||||
text: []rune(text),
|
||||
origText: origText})
|
||||
}
|
||||
}
|
||||
return terms
|
||||
}
|
||||
|
||||
func (p *Pattern) IsEmpty() bool {
|
||||
if p.mode == MODE_FUZZY {
|
||||
return len(p.text) == 0
|
||||
} else {
|
||||
return len(p.terms) == 0
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pattern) AsString() string {
|
||||
return string(p.text)
|
||||
}
|
||||
|
||||
func (p *Pattern) CacheKey() string {
|
||||
if p.mode == MODE_FUZZY {
|
||||
return p.AsString()
|
||||
}
|
||||
cacheableTerms := []string{}
|
||||
for _, term := range p.terms {
|
||||
if term.inv {
|
||||
continue
|
||||
}
|
||||
cacheableTerms = append(cacheableTerms, string(term.origText))
|
||||
}
|
||||
sort.Strings(cacheableTerms)
|
||||
return strings.Join(cacheableTerms, " ")
|
||||
}
|
||||
|
||||
func (p *Pattern) Match(chunk *Chunk) []*Item {
|
||||
space := chunk
|
||||
|
||||
// ChunkCache: Exact match
|
||||
cacheKey := p.CacheKey()
|
||||
if !p.hasInvTerm { // Because we're excluding Inv-term from cache key
|
||||
if cached, found := _cache.Find(chunk, cacheKey); found {
|
||||
return cached
|
||||
}
|
||||
}
|
||||
|
||||
// ChunkCache: Prefix match
|
||||
foundPrefixCache := false
|
||||
for idx := len(cacheKey) - 1; idx > 0; idx-- {
|
||||
if cached, found := _cache.Find(chunk, cacheKey[:idx]); found {
|
||||
cachedChunk := Chunk(cached)
|
||||
space = &cachedChunk
|
||||
foundPrefixCache = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// ChunkCache: Suffix match
|
||||
if !foundPrefixCache {
|
||||
for idx := 1; idx < len(cacheKey); idx++ {
|
||||
if cached, found := _cache.Find(chunk, cacheKey[idx:]); found {
|
||||
cachedChunk := Chunk(cached)
|
||||
space = &cachedChunk
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var matches []*Item
|
||||
if p.mode == MODE_FUZZY {
|
||||
matches = p.fuzzyMatch(space)
|
||||
} else {
|
||||
matches = p.extendedMatch(space)
|
||||
}
|
||||
|
||||
if !p.hasInvTerm {
|
||||
_cache.Add(chunk, cacheKey, matches)
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
func (p *Pattern) fuzzyMatch(chunk *Chunk) []*Item {
|
||||
matches := []*Item{}
|
||||
for _, item := range *chunk {
|
||||
input := p.prepareInput(item)
|
||||
if sidx, eidx := p.iter(FuzzyMatch, input, p.text); sidx >= 0 {
|
||||
matches = append(matches, &Item{
|
||||
text: item.text,
|
||||
index: item.index,
|
||||
offsets: []Offset{Offset{sidx, eidx}},
|
||||
rank: NilRank})
|
||||
}
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
func (p *Pattern) extendedMatch(chunk *Chunk) []*Item {
|
||||
matches := []*Item{}
|
||||
for _, item := range *chunk {
|
||||
input := p.prepareInput(item)
|
||||
offsets := []Offset{}
|
||||
Loop:
|
||||
for _, term := range p.terms {
|
||||
pfun := p.procFun[term.typ]
|
||||
if sidx, eidx := p.iter(pfun, input, term.text); sidx >= 0 {
|
||||
if term.inv {
|
||||
break Loop
|
||||
}
|
||||
offsets = append(offsets, Offset{sidx, eidx})
|
||||
} else if term.inv {
|
||||
offsets = append(offsets, Offset{0, 0})
|
||||
}
|
||||
}
|
||||
if len(offsets) == len(p.terms) {
|
||||
matches = append(matches, &Item{
|
||||
text: item.text,
|
||||
index: item.index,
|
||||
offsets: offsets,
|
||||
rank: NilRank})
|
||||
}
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
func (p *Pattern) prepareInput(item *Item) *Transformed {
|
||||
if item.transformed != nil {
|
||||
return item.transformed
|
||||
}
|
||||
|
||||
var ret *Transformed
|
||||
if len(p.nth) > 0 {
|
||||
tokens := Tokenize(item.text, p.delimiter)
|
||||
ret = Transform(tokens, p.nth)
|
||||
} else {
|
||||
trans := Transformed{
|
||||
whole: item.text,
|
||||
parts: []Token{Token{text: item.text, prefixLength: 0}}}
|
||||
ret = &trans
|
||||
}
|
||||
item.transformed = ret
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *Pattern) iter(pfun func(bool, *string, []rune) (int, int),
|
||||
inputs *Transformed, pattern []rune) (int, int) {
|
||||
for _, part := range inputs.parts {
|
||||
prefixLength := part.prefixLength
|
||||
if sidx, eidx := pfun(p.caseSensitive, part.text, pattern); sidx >= 0 {
|
||||
return sidx + prefixLength, eidx + prefixLength
|
||||
}
|
||||
}
|
||||
return -1, -1
|
||||
}
|
87
src/pattern_test.go
Normal file
87
src/pattern_test.go
Normal file
@ -0,0 +1,87 @@
|
||||
package fzf
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParseTermsExtended(t *testing.T) {
|
||||
terms := parseTerms(MODE_EXTENDED,
|
||||
"aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$")
|
||||
if len(terms) != 8 ||
|
||||
terms[0].typ != TERM_FUZZY || terms[0].inv ||
|
||||
terms[1].typ != TERM_EXACT || terms[1].inv ||
|
||||
terms[2].typ != TERM_PREFIX || terms[2].inv ||
|
||||
terms[3].typ != TERM_SUFFIX || terms[3].inv ||
|
||||
terms[4].typ != TERM_FUZZY || !terms[4].inv ||
|
||||
terms[5].typ != TERM_EXACT || !terms[5].inv ||
|
||||
terms[6].typ != TERM_PREFIX || !terms[6].inv ||
|
||||
terms[7].typ != TERM_SUFFIX || !terms[7].inv {
|
||||
t.Errorf("%s", terms)
|
||||
}
|
||||
for idx, term := range terms {
|
||||
if len(term.text) != 3 {
|
||||
t.Errorf("%s", term)
|
||||
}
|
||||
if idx > 0 && len(term.origText) != 4+idx/5 {
|
||||
t.Errorf("%s", term)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTermsExtendedExact(t *testing.T) {
|
||||
terms := parseTerms(MODE_EXTENDED_EXACT,
|
||||
"aaa 'bbb ^ccc ddd$ !eee !'fff !^ggg !hhh$")
|
||||
if len(terms) != 8 ||
|
||||
terms[0].typ != TERM_EXACT || terms[0].inv || len(terms[0].text) != 3 ||
|
||||
terms[1].typ != TERM_EXACT || terms[1].inv || len(terms[1].text) != 4 ||
|
||||
terms[2].typ != TERM_PREFIX || terms[2].inv || len(terms[2].text) != 3 ||
|
||||
terms[3].typ != TERM_SUFFIX || terms[3].inv || len(terms[3].text) != 3 ||
|
||||
terms[4].typ != TERM_EXACT || !terms[4].inv || len(terms[4].text) != 3 ||
|
||||
terms[5].typ != TERM_EXACT || !terms[5].inv || len(terms[5].text) != 4 ||
|
||||
terms[6].typ != TERM_PREFIX || !terms[6].inv || len(terms[6].text) != 3 ||
|
||||
terms[7].typ != TERM_SUFFIX || !terms[7].inv || len(terms[7].text) != 3 {
|
||||
t.Errorf("%s", terms)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTermsEmpty(t *testing.T) {
|
||||
terms := parseTerms(MODE_EXTENDED, "' $ ^ !' !^ !$")
|
||||
if len(terms) != 0 {
|
||||
t.Errorf("%s", terms)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExact(t *testing.T) {
|
||||
defer clearPatternCache()
|
||||
clearPatternCache()
|
||||
pattern := BuildPattern(MODE_EXTENDED, CASE_SMART,
|
||||
[]Range{}, nil, []rune("'abc"))
|
||||
str := "aabbcc abc"
|
||||
sidx, eidx := ExactMatchNaive(pattern.caseSensitive, &str, pattern.terms[0].text)
|
||||
if sidx != 7 || eidx != 10 {
|
||||
t.Errorf("%s / %d / %d", pattern.terms, sidx, eidx)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCaseSensitivity(t *testing.T) {
|
||||
defer clearPatternCache()
|
||||
clearPatternCache()
|
||||
pat1 := BuildPattern(MODE_FUZZY, CASE_SMART, []Range{}, nil, []rune("abc"))
|
||||
clearPatternCache()
|
||||
pat2 := BuildPattern(MODE_FUZZY, CASE_SMART, []Range{}, nil, []rune("Abc"))
|
||||
clearPatternCache()
|
||||
pat3 := BuildPattern(MODE_FUZZY, CASE_IGNORE, []Range{}, nil, []rune("abc"))
|
||||
clearPatternCache()
|
||||
pat4 := BuildPattern(MODE_FUZZY, CASE_IGNORE, []Range{}, nil, []rune("Abc"))
|
||||
clearPatternCache()
|
||||
pat5 := BuildPattern(MODE_FUZZY, CASE_RESPECT, []Range{}, nil, []rune("abc"))
|
||||
clearPatternCache()
|
||||
pat6 := BuildPattern(MODE_FUZZY, CASE_RESPECT, []Range{}, nil, []rune("Abc"))
|
||||
|
||||
if string(pat1.text) != "abc" || pat1.caseSensitive != false ||
|
||||
string(pat2.text) != "Abc" || pat2.caseSensitive != true ||
|
||||
string(pat3.text) != "abc" || pat3.caseSensitive != false ||
|
||||
string(pat4.text) != "abc" || pat4.caseSensitive != false ||
|
||||
string(pat5.text) != "abc" || pat5.caseSensitive != true ||
|
||||
string(pat6.text) != "Abc" || pat6.caseSensitive != true {
|
||||
t.Error("Invalid case conversion")
|
||||
}
|
||||
}
|
60
src/reader.go
Normal file
60
src/reader.go
Normal file
@ -0,0 +1,60 @@
|
||||
package fzf
|
||||
|
||||
// #include <unistd.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
const DEFAULT_COMMAND = "find * -path '*/\\.*' -prune -o -type f -print -o -type l -print 2> /dev/null"
|
||||
|
||||
type Reader struct {
|
||||
pusher func(string)
|
||||
eventBox *EventBox
|
||||
}
|
||||
|
||||
func (r *Reader) ReadSource() {
|
||||
if int(C.isatty(C.int(os.Stdin.Fd()))) != 0 {
|
||||
cmd := os.Getenv("FZF_DEFAULT_COMMAND")
|
||||
if len(cmd) == 0 {
|
||||
cmd = DEFAULT_COMMAND
|
||||
}
|
||||
r.readFromCommand(cmd)
|
||||
} else {
|
||||
r.readFromStdin()
|
||||
}
|
||||
r.eventBox.Set(EVT_READ_FIN, nil)
|
||||
}
|
||||
|
||||
func (r *Reader) feed(src io.Reader) {
|
||||
if scanner := bufio.NewScanner(src); scanner != nil {
|
||||
for scanner.Scan() {
|
||||
r.pusher(scanner.Text())
|
||||
r.eventBox.Set(EVT_READ_NEW, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) readFromStdin() {
|
||||
r.feed(os.Stdin)
|
||||
}
|
||||
|
||||
func (r *Reader) readFromCommand(cmd string) {
|
||||
arg := fmt.Sprintf("%q", cmd)
|
||||
listCommand := exec.Command("sh", "-c", arg[1:len(arg)-1])
|
||||
out, err := listCommand.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = listCommand.Start()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer listCommand.Wait()
|
||||
r.feed(out)
|
||||
}
|
52
src/reader_test.go
Normal file
52
src/reader_test.go
Normal file
@ -0,0 +1,52 @@
|
||||
package fzf
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReadFromCommand(t *testing.T) {
|
||||
strs := []string{}
|
||||
eb := NewEventBox()
|
||||
reader := Reader{
|
||||
pusher: func(s string) { strs = append(strs, s) },
|
||||
eventBox: eb}
|
||||
|
||||
// Check EventBox
|
||||
if eb.Peak(EVT_READ_NEW) {
|
||||
t.Error("EVT_READ_NEW should not be set yet")
|
||||
}
|
||||
|
||||
// Normal command
|
||||
reader.readFromCommand(`echo abc && echo def`)
|
||||
if len(strs) != 2 || strs[0] != "abc" || strs[1] != "def" {
|
||||
t.Errorf("%s", strs)
|
||||
}
|
||||
|
||||
// Check EventBox again
|
||||
if !eb.Peak(EVT_READ_NEW) {
|
||||
t.Error("EVT_READ_NEW should be set yet")
|
||||
}
|
||||
|
||||
// Wait should return immediately
|
||||
eb.Wait(func(events *Events) {
|
||||
if _, found := (*events)[EVT_READ_NEW]; !found {
|
||||
t.Errorf("%s", events)
|
||||
}
|
||||
events.Clear()
|
||||
})
|
||||
|
||||
// EventBox is cleared
|
||||
if eb.Peak(EVT_READ_NEW) {
|
||||
t.Error("EVT_READ_NEW should not be set yet")
|
||||
}
|
||||
|
||||
// Failing command
|
||||
reader.readFromCommand(`no-such-command`)
|
||||
strs = []string{}
|
||||
if len(strs) > 0 {
|
||||
t.Errorf("%s", strs)
|
||||
}
|
||||
|
||||
// Check EventBox again
|
||||
if eb.Peak(EVT_READ_NEW) {
|
||||
t.Error("Command failed. EVT_READ_NEW should be set")
|
||||
}
|
||||
}
|
580
src/terminal.go
Normal file
580
src/terminal.go
Normal file
@ -0,0 +1,580 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
C "github.com/junegunn/fzf/src/curses"
|
||||
"github.com/junegunn/go-runewidth"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Terminal struct {
|
||||
prompt string
|
||||
reverse bool
|
||||
tac bool
|
||||
cx int
|
||||
cy int
|
||||
offset int
|
||||
yanked []rune
|
||||
input []rune
|
||||
multi bool
|
||||
printQuery bool
|
||||
count int
|
||||
progress int
|
||||
reading bool
|
||||
list []*Item
|
||||
selected map[*string]*string
|
||||
reqBox *EventBox
|
||||
eventBox *EventBox
|
||||
mutex sync.Mutex
|
||||
initFunc func()
|
||||
}
|
||||
|
||||
var _spinner []string = []string{`-`, `\`, `|`, `/`, `-`, `\`, `|`, `/`}
|
||||
|
||||
const (
|
||||
REQ_PROMPT EventType = iota
|
||||
REQ_INFO
|
||||
REQ_LIST
|
||||
REQ_REDRAW
|
||||
REQ_CLOSE
|
||||
REQ_QUIT
|
||||
)
|
||||
|
||||
func NewTerminal(opts *Options, eventBox *EventBox) *Terminal {
|
||||
input := []rune(opts.Query)
|
||||
return &Terminal{
|
||||
prompt: opts.Prompt,
|
||||
tac: opts.Sort == 0,
|
||||
reverse: opts.Reverse,
|
||||
cx: displayWidth(input),
|
||||
cy: 0,
|
||||
offset: 0,
|
||||
yanked: []rune{},
|
||||
input: input,
|
||||
multi: opts.Multi,
|
||||
printQuery: opts.PrintQuery,
|
||||
list: []*Item{},
|
||||
selected: make(map[*string]*string),
|
||||
reqBox: NewEventBox(),
|
||||
eventBox: eventBox,
|
||||
mutex: sync.Mutex{},
|
||||
initFunc: func() {
|
||||
C.Init(opts.Color, opts.Color256, opts.Black, opts.Mouse)
|
||||
}}
|
||||
}
|
||||
|
||||
func (t *Terminal) Input() []rune {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
return copySlice(t.input)
|
||||
}
|
||||
|
||||
func (t *Terminal) UpdateCount(cnt int, final bool) {
|
||||
t.mutex.Lock()
|
||||
t.count = cnt
|
||||
t.reading = !final
|
||||
t.mutex.Unlock()
|
||||
t.reqBox.Set(REQ_INFO, nil)
|
||||
}
|
||||
|
||||
func (t *Terminal) UpdateProgress(progress float32) {
|
||||
t.mutex.Lock()
|
||||
t.progress = int(progress * 100)
|
||||
t.mutex.Unlock()
|
||||
t.reqBox.Set(REQ_INFO, nil)
|
||||
}
|
||||
|
||||
func (t *Terminal) UpdateList(list []*Item) {
|
||||
t.mutex.Lock()
|
||||
t.progress = 100
|
||||
t.list = list
|
||||
t.mutex.Unlock()
|
||||
t.reqBox.Set(REQ_INFO, nil)
|
||||
t.reqBox.Set(REQ_LIST, nil)
|
||||
}
|
||||
|
||||
func (t *Terminal) listIndex(y int) int {
|
||||
if t.tac {
|
||||
return len(t.list) - y - 1
|
||||
} else {
|
||||
return y
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) output() {
|
||||
if t.printQuery {
|
||||
fmt.Println(string(t.input))
|
||||
}
|
||||
if len(t.selected) == 0 {
|
||||
if len(t.list) > t.cy {
|
||||
t.list[t.listIndex(t.cy)].Print()
|
||||
}
|
||||
} else {
|
||||
for ptr, orig := range t.selected {
|
||||
if orig != nil {
|
||||
fmt.Println(*orig)
|
||||
} else {
|
||||
fmt.Println(*ptr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func displayWidth(runes []rune) int {
|
||||
l := 0
|
||||
for _, r := range runes {
|
||||
l += runewidth.RuneWidth(r)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (t *Terminal) move(y int, x int, clear bool) {
|
||||
maxy := C.MaxY()
|
||||
if !t.reverse {
|
||||
y = maxy - y - 1
|
||||
}
|
||||
|
||||
if clear {
|
||||
C.MoveAndClear(y, x)
|
||||
} else {
|
||||
C.Move(y, x)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) placeCursor() {
|
||||
t.move(0, len(t.prompt)+displayWidth(t.input[:t.cx]), false)
|
||||
}
|
||||
|
||||
func (t *Terminal) printPrompt() {
|
||||
t.move(0, 0, true)
|
||||
C.CPrint(C.COL_PROMPT, true, t.prompt)
|
||||
C.CPrint(C.COL_NORMAL, true, string(t.input))
|
||||
}
|
||||
|
||||
func (t *Terminal) printInfo() {
|
||||
t.move(1, 0, true)
|
||||
if t.reading {
|
||||
duration := int64(200) * int64(time.Millisecond)
|
||||
idx := (time.Now().UnixNano() % (duration * int64(len(_spinner)))) / duration
|
||||
C.CPrint(C.COL_SPINNER, true, _spinner[idx])
|
||||
}
|
||||
|
||||
t.move(1, 2, false)
|
||||
output := fmt.Sprintf("%d/%d", len(t.list), t.count)
|
||||
if t.multi && len(t.selected) > 0 {
|
||||
output += fmt.Sprintf(" (%d)", len(t.selected))
|
||||
}
|
||||
if t.progress > 0 && t.progress < 100 {
|
||||
output += fmt.Sprintf(" (%d%%)", t.progress)
|
||||
}
|
||||
C.CPrint(C.COL_INFO, false, output)
|
||||
}
|
||||
|
||||
func (t *Terminal) printList() {
|
||||
t.constrain()
|
||||
|
||||
maxy := maxItems()
|
||||
count := len(t.list) - t.offset
|
||||
for i := 0; i < maxy; i++ {
|
||||
t.move(i+2, 0, true)
|
||||
if i < count {
|
||||
t.printItem(t.list[t.listIndex(i+t.offset)], i == t.cy-t.offset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) printItem(item *Item, current bool) {
|
||||
_, selected := t.selected[item.text]
|
||||
if current {
|
||||
C.CPrint(C.COL_CURSOR, true, ">")
|
||||
if selected {
|
||||
C.CPrint(C.COL_CURRENT, true, ">")
|
||||
} else {
|
||||
C.CPrint(C.COL_CURRENT, true, " ")
|
||||
}
|
||||
t.printHighlighted(item, true, C.COL_CURRENT, C.COL_CURRENT_MATCH)
|
||||
} else {
|
||||
C.CPrint(C.COL_CURSOR, true, " ")
|
||||
if selected {
|
||||
C.CPrint(C.COL_SELECTED, true, ">")
|
||||
} else {
|
||||
C.Print(" ")
|
||||
}
|
||||
t.printHighlighted(item, false, 0, C.COL_MATCH)
|
||||
}
|
||||
}
|
||||
|
||||
func trimRight(runes []rune, width int) ([]rune, int) {
|
||||
currentWidth := displayWidth(runes)
|
||||
trimmed := 0
|
||||
|
||||
for currentWidth > width && len(runes) > 0 {
|
||||
sz := len(runes)
|
||||
currentWidth -= runewidth.RuneWidth(runes[sz-1])
|
||||
runes = runes[:sz-1]
|
||||
trimmed += 1
|
||||
}
|
||||
return runes, trimmed
|
||||
}
|
||||
|
||||
func trimLeft(runes []rune, width int) ([]rune, int) {
|
||||
currentWidth := displayWidth(runes)
|
||||
trimmed := 0
|
||||
|
||||
for currentWidth > width && len(runes) > 0 {
|
||||
currentWidth -= runewidth.RuneWidth(runes[0])
|
||||
runes = runes[1:]
|
||||
trimmed += 1
|
||||
}
|
||||
return runes, trimmed
|
||||
}
|
||||
|
||||
func (*Terminal) printHighlighted(item *Item, bold bool, col1 int, col2 int) {
|
||||
maxe := 0
|
||||
for _, offset := range item.offsets {
|
||||
if offset[1] > maxe {
|
||||
maxe = offset[1]
|
||||
}
|
||||
}
|
||||
|
||||
// Overflow
|
||||
text := []rune(*item.text)
|
||||
offsets := item.offsets
|
||||
maxWidth := C.MaxX() - 3
|
||||
fullWidth := displayWidth(text)
|
||||
if fullWidth > maxWidth {
|
||||
// Stri..
|
||||
matchEndWidth := displayWidth(text[:maxe])
|
||||
if matchEndWidth <= maxWidth-2 {
|
||||
text, _ = trimRight(text, maxWidth-2)
|
||||
text = append(text, []rune("..")...)
|
||||
} else {
|
||||
// Stri..
|
||||
if matchEndWidth < fullWidth-2 {
|
||||
text = append(text[:maxe], []rune("..")...)
|
||||
}
|
||||
// ..ri..
|
||||
var diff int
|
||||
text, diff = trimLeft(text, maxWidth-2)
|
||||
|
||||
// Transform offsets
|
||||
offsets = make([]Offset, len(item.offsets))
|
||||
for idx, offset := range item.offsets {
|
||||
b, e := offset[0], offset[1]
|
||||
b += 2 - diff
|
||||
e += 2 - diff
|
||||
b = Max(b, 2)
|
||||
if b < e {
|
||||
offsets[idx] = Offset{b, e}
|
||||
}
|
||||
}
|
||||
text = append([]rune(".."), text...)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(ByOrder(offsets))
|
||||
index := 0
|
||||
for _, offset := range offsets {
|
||||
b := Max(index, offset[0])
|
||||
e := Max(index, offset[1])
|
||||
C.CPrint(col1, bold, string(text[index:b]))
|
||||
C.CPrint(col2, bold, string(text[b:e]))
|
||||
index = e
|
||||
}
|
||||
if index < len(text) {
|
||||
C.CPrint(col1, bold, string(text[index:]))
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) printAll() {
|
||||
t.printList()
|
||||
t.printInfo()
|
||||
t.printPrompt()
|
||||
}
|
||||
|
||||
func (t *Terminal) refresh() {
|
||||
t.placeCursor()
|
||||
C.Refresh()
|
||||
}
|
||||
|
||||
func (t *Terminal) delChar() bool {
|
||||
if len(t.input) > 0 && t.cx < len(t.input) {
|
||||
t.input = append(t.input[:t.cx], t.input[t.cx+1:]...)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func findLastMatch(pattern string, str string) int {
|
||||
rx, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
locs := rx.FindAllStringIndex(str, -1)
|
||||
if locs == nil {
|
||||
return -1
|
||||
}
|
||||
return locs[len(locs)-1][0]
|
||||
}
|
||||
|
||||
func findFirstMatch(pattern string, str string) int {
|
||||
rx, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
loc := rx.FindStringIndex(str)
|
||||
if loc == nil {
|
||||
return -1
|
||||
}
|
||||
return loc[0]
|
||||
}
|
||||
|
||||
func copySlice(slice []rune) []rune {
|
||||
ret := make([]rune, len(slice))
|
||||
copy(ret, slice)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (t *Terminal) rubout(pattern string) {
|
||||
pcx := t.cx
|
||||
after := t.input[t.cx:]
|
||||
t.cx = findLastMatch(pattern, string(t.input[:t.cx])) + 1
|
||||
t.yanked = copySlice(t.input[t.cx:pcx])
|
||||
t.input = append(t.input[:t.cx], after...)
|
||||
}
|
||||
|
||||
func (t *Terminal) Loop() {
|
||||
{ // Late initialization
|
||||
t.mutex.Lock()
|
||||
t.initFunc()
|
||||
t.printInfo()
|
||||
t.printPrompt()
|
||||
t.refresh()
|
||||
t.mutex.Unlock()
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
t.reqBox.Wait(func(events *Events) {
|
||||
defer events.Clear()
|
||||
t.mutex.Lock()
|
||||
for req := range *events {
|
||||
switch req {
|
||||
case REQ_PROMPT:
|
||||
t.printPrompt()
|
||||
case REQ_INFO:
|
||||
t.printInfo()
|
||||
case REQ_LIST:
|
||||
t.printList()
|
||||
case REQ_REDRAW:
|
||||
C.Clear()
|
||||
t.printAll()
|
||||
case REQ_CLOSE:
|
||||
C.Close()
|
||||
t.output()
|
||||
os.Exit(0)
|
||||
case REQ_QUIT:
|
||||
C.Close()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
t.mutex.Unlock()
|
||||
})
|
||||
t.refresh()
|
||||
}
|
||||
}()
|
||||
|
||||
looping := true
|
||||
for looping {
|
||||
event := C.GetChar()
|
||||
|
||||
t.mutex.Lock()
|
||||
previousInput := t.input
|
||||
events := []EventType{REQ_PROMPT}
|
||||
toggle := func() {
|
||||
item := t.list[t.listIndex(t.cy)]
|
||||
if _, found := t.selected[item.text]; !found {
|
||||
t.selected[item.text] = item.origText
|
||||
} else {
|
||||
delete(t.selected, item.text)
|
||||
}
|
||||
}
|
||||
req := func(evts ...EventType) {
|
||||
for _, event := range evts {
|
||||
events = append(events, event)
|
||||
if event == REQ_CLOSE || event == REQ_QUIT {
|
||||
looping = false
|
||||
}
|
||||
}
|
||||
}
|
||||
switch event.Type {
|
||||
case C.INVALID:
|
||||
continue
|
||||
case C.CTRL_A:
|
||||
t.cx = 0
|
||||
case C.CTRL_B:
|
||||
if t.cx > 0 {
|
||||
t.cx -= 1
|
||||
}
|
||||
case C.CTRL_C, C.CTRL_G, C.CTRL_Q, C.ESC:
|
||||
req(REQ_QUIT)
|
||||
case C.CTRL_D:
|
||||
if !t.delChar() && t.cx == 0 {
|
||||
req(REQ_QUIT)
|
||||
}
|
||||
case C.CTRL_E:
|
||||
t.cx = len(t.input)
|
||||
case C.CTRL_F:
|
||||
if t.cx < len(t.input) {
|
||||
t.cx += 1
|
||||
}
|
||||
case C.CTRL_H:
|
||||
if t.cx > 0 {
|
||||
t.input = append(t.input[:t.cx-1], t.input[t.cx:]...)
|
||||
t.cx -= 1
|
||||
}
|
||||
case C.TAB:
|
||||
if t.multi && len(t.list) > 0 {
|
||||
toggle()
|
||||
t.vmove(-1)
|
||||
req(REQ_LIST, REQ_INFO)
|
||||
}
|
||||
case C.BTAB:
|
||||
if t.multi && len(t.list) > 0 {
|
||||
toggle()
|
||||
t.vmove(1)
|
||||
req(REQ_LIST, REQ_INFO)
|
||||
}
|
||||
case C.CTRL_J, C.CTRL_N:
|
||||
t.vmove(-1)
|
||||
req(REQ_LIST)
|
||||
case C.CTRL_K, C.CTRL_P:
|
||||
t.vmove(1)
|
||||
req(REQ_LIST)
|
||||
case C.CTRL_M:
|
||||
req(REQ_CLOSE)
|
||||
case C.CTRL_L:
|
||||
req(REQ_REDRAW)
|
||||
case C.CTRL_U:
|
||||
if t.cx > 0 {
|
||||
t.yanked = copySlice(t.input[:t.cx])
|
||||
t.input = t.input[t.cx:]
|
||||
t.cx = 0
|
||||
}
|
||||
case C.CTRL_W:
|
||||
if t.cx > 0 {
|
||||
t.rubout("\\s\\S")
|
||||
}
|
||||
case C.ALT_BS:
|
||||
if t.cx > 0 {
|
||||
t.rubout("[^[:alnum:]][[:alnum:]]")
|
||||
}
|
||||
case C.CTRL_Y:
|
||||
t.input = append(append(t.input[:t.cx], t.yanked...), t.input[t.cx:]...)
|
||||
t.cx += len(t.yanked)
|
||||
case C.DEL:
|
||||
t.delChar()
|
||||
case C.PGUP:
|
||||
t.vmove(maxItems() - 1)
|
||||
req(REQ_LIST)
|
||||
case C.PGDN:
|
||||
t.vmove(-(maxItems() - 1))
|
||||
req(REQ_LIST)
|
||||
case C.ALT_B:
|
||||
t.cx = findLastMatch("[^[:alnum:]][[:alnum:]]", string(t.input[:t.cx])) + 1
|
||||
case C.ALT_F:
|
||||
t.cx += findFirstMatch("[[:alnum:]][^[:alnum:]]|(.$)", string(t.input[t.cx:])) + 1
|
||||
case C.ALT_D:
|
||||
ncx := t.cx +
|
||||
findFirstMatch("[[:alnum:]][^[:alnum:]]|(.$)", string(t.input[t.cx:])) + 1
|
||||
if ncx > t.cx {
|
||||
t.yanked = copySlice(t.input[t.cx:ncx])
|
||||
t.input = append(t.input[:t.cx], t.input[ncx:]...)
|
||||
}
|
||||
case C.RUNE:
|
||||
prefix := copySlice(t.input[:t.cx])
|
||||
t.input = append(append(prefix, event.Char), t.input[t.cx:]...)
|
||||
t.cx += 1
|
||||
case C.MOUSE:
|
||||
me := event.MouseEvent
|
||||
mx, my := Min(len(t.input), Max(0, me.X-len(t.prompt))), me.Y
|
||||
if !t.reverse {
|
||||
my = C.MaxY() - my - 1
|
||||
}
|
||||
if me.S != 0 {
|
||||
// Scroll
|
||||
if me.Mod {
|
||||
toggle()
|
||||
}
|
||||
t.vmove(me.S)
|
||||
req(REQ_LIST)
|
||||
} else if me.Double {
|
||||
// Double-click
|
||||
if my >= 2 {
|
||||
t.cy = my - 2
|
||||
req(REQ_CLOSE)
|
||||
}
|
||||
} else if me.Down {
|
||||
if my == 0 && mx >= 0 {
|
||||
// Prompt
|
||||
t.cx = mx
|
||||
req(REQ_PROMPT)
|
||||
} else if my >= 2 {
|
||||
// List
|
||||
t.cy = my - 2
|
||||
if me.Mod {
|
||||
toggle()
|
||||
}
|
||||
req(REQ_LIST)
|
||||
}
|
||||
}
|
||||
}
|
||||
changed := string(previousInput) != string(t.input)
|
||||
t.mutex.Unlock() // Must be unlocked before touching reqBox
|
||||
|
||||
if changed {
|
||||
t.eventBox.Set(EVT_SEARCH_NEW, nil)
|
||||
}
|
||||
for _, event := range events {
|
||||
t.reqBox.Set(event, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) constrain() {
|
||||
count := len(t.list)
|
||||
height := C.MaxY() - 2
|
||||
diffpos := t.cy - t.offset
|
||||
|
||||
t.cy = Max(0, Min(t.cy, count-1))
|
||||
|
||||
if t.cy > t.offset+(height-1) {
|
||||
// Ceil
|
||||
t.offset = t.cy - (height - 1)
|
||||
} else if t.offset > t.cy {
|
||||
// Floor
|
||||
t.offset = t.cy
|
||||
}
|
||||
|
||||
// Adjustment
|
||||
if count-t.offset < height {
|
||||
t.offset = Max(0, count-height)
|
||||
t.cy = Max(0, Min(t.offset+diffpos, count-1))
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) vmove(o int) {
|
||||
if t.reverse {
|
||||
t.cy -= o
|
||||
} else {
|
||||
t.cy += o
|
||||
}
|
||||
}
|
||||
|
||||
func maxItems() int {
|
||||
return C.MaxY() - 2
|
||||
}
|
194
src/tokenizer.go
Normal file
194
src/tokenizer.go
Normal file
@ -0,0 +1,194 @@
|
||||
package fzf
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const RANGE_ELLIPSIS = 0
|
||||
|
||||
type Range struct {
|
||||
begin int
|
||||
end int
|
||||
}
|
||||
|
||||
type Transformed struct {
|
||||
whole *string
|
||||
parts []Token
|
||||
}
|
||||
|
||||
type Token struct {
|
||||
text *string
|
||||
prefixLength int
|
||||
}
|
||||
|
||||
func ParseRange(str *string) (Range, bool) {
|
||||
if (*str) == ".." {
|
||||
return Range{RANGE_ELLIPSIS, RANGE_ELLIPSIS}, true
|
||||
} else if strings.HasPrefix(*str, "..") {
|
||||
end, err := strconv.Atoi((*str)[2:])
|
||||
if err != nil || end == 0 {
|
||||
return Range{}, false
|
||||
} else {
|
||||
return Range{RANGE_ELLIPSIS, end}, true
|
||||
}
|
||||
} else if strings.HasSuffix(*str, "..") {
|
||||
begin, err := strconv.Atoi((*str)[:len(*str)-2])
|
||||
if err != nil || begin == 0 {
|
||||
return Range{}, false
|
||||
} else {
|
||||
return Range{begin, RANGE_ELLIPSIS}, true
|
||||
}
|
||||
} else if strings.Contains(*str, "..") {
|
||||
ns := strings.Split(*str, "..")
|
||||
if len(ns) != 2 {
|
||||
return Range{}, false
|
||||
}
|
||||
begin, err1 := strconv.Atoi(ns[0])
|
||||
end, err2 := strconv.Atoi(ns[1])
|
||||
if err1 != nil || err2 != nil {
|
||||
return Range{}, false
|
||||
}
|
||||
return Range{begin, end}, true
|
||||
}
|
||||
|
||||
n, err := strconv.Atoi(*str)
|
||||
if err != nil || n == 0 {
|
||||
return Range{}, false
|
||||
}
|
||||
return Range{n, n}, true
|
||||
}
|
||||
|
||||
func withPrefixLengths(tokens []string, begin int) []Token {
|
||||
ret := make([]Token, len(tokens))
|
||||
|
||||
prefixLength := begin
|
||||
for idx, token := range tokens {
|
||||
// Need to define a new local variable instead of the reused token to take
|
||||
// the pointer to it
|
||||
str := token
|
||||
ret[idx] = Token{text: &str, prefixLength: prefixLength}
|
||||
prefixLength += len([]rune(token))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
const (
|
||||
AWK_NIL = iota
|
||||
AWK_BLACK
|
||||
AWK_WHITE
|
||||
)
|
||||
|
||||
func awkTokenizer(input *string) ([]string, int) {
|
||||
// 9, 32
|
||||
ret := []string{}
|
||||
str := []rune{}
|
||||
prefixLength := 0
|
||||
state := AWK_NIL
|
||||
for _, r := range []rune(*input) {
|
||||
white := r == 9 || r == 32
|
||||
switch state {
|
||||
case AWK_NIL:
|
||||
if white {
|
||||
prefixLength++
|
||||
} else {
|
||||
state = AWK_BLACK
|
||||
str = append(str, r)
|
||||
}
|
||||
case AWK_BLACK:
|
||||
str = append(str, r)
|
||||
if white {
|
||||
state = AWK_WHITE
|
||||
}
|
||||
case AWK_WHITE:
|
||||
if white {
|
||||
str = append(str, r)
|
||||
} else {
|
||||
ret = append(ret, string(str))
|
||||
state = AWK_BLACK
|
||||
str = []rune{r}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(str) > 0 {
|
||||
ret = append(ret, string(str))
|
||||
}
|
||||
return ret, prefixLength
|
||||
}
|
||||
|
||||
func Tokenize(str *string, delimiter *regexp.Regexp) []Token {
|
||||
prefixLength := 0
|
||||
if delimiter == nil {
|
||||
// AWK-style (\S+\s*)
|
||||
tokens, prefixLength := awkTokenizer(str)
|
||||
return withPrefixLengths(tokens, prefixLength)
|
||||
} else {
|
||||
tokens := delimiter.FindAllString(*str, -1)
|
||||
return withPrefixLengths(tokens, prefixLength)
|
||||
}
|
||||
}
|
||||
|
||||
func joinTokens(tokens []Token) string {
|
||||
ret := ""
|
||||
for _, token := range tokens {
|
||||
ret += *token.text
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func Transform(tokens []Token, withNth []Range) *Transformed {
|
||||
transTokens := make([]Token, len(withNth))
|
||||
numTokens := len(tokens)
|
||||
whole := ""
|
||||
for idx, r := range withNth {
|
||||
part := ""
|
||||
minIdx := 0
|
||||
if r.begin == r.end {
|
||||
idx := r.begin
|
||||
if idx == RANGE_ELLIPSIS {
|
||||
part += joinTokens(tokens)
|
||||
} else {
|
||||
if idx < 0 {
|
||||
idx += numTokens + 1
|
||||
}
|
||||
if idx >= 1 && idx <= numTokens {
|
||||
minIdx = idx - 1
|
||||
part += *tokens[idx-1].text
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var begin, end int
|
||||
if r.begin == RANGE_ELLIPSIS { // ..N
|
||||
begin, end = 1, r.end
|
||||
if end < 0 {
|
||||
end += numTokens + 1
|
||||
}
|
||||
} else if r.end == RANGE_ELLIPSIS { // N..
|
||||
begin, end = r.begin, numTokens
|
||||
if begin < 0 {
|
||||
begin += numTokens + 1
|
||||
}
|
||||
} else {
|
||||
begin, end = r.begin, r.end
|
||||
if begin < 0 {
|
||||
begin += numTokens + 1
|
||||
}
|
||||
if end < 0 {
|
||||
end += numTokens + 1
|
||||
}
|
||||
}
|
||||
minIdx = Max(0, begin-1)
|
||||
for idx := begin; idx <= end; idx++ {
|
||||
if idx >= 1 && idx <= numTokens {
|
||||
part += *tokens[idx-1].text
|
||||
}
|
||||
}
|
||||
}
|
||||
whole += part
|
||||
transTokens[idx] = Token{&part, tokens[minIdx].prefixLength}
|
||||
}
|
||||
return &Transformed{
|
||||
whole: &whole,
|
||||
parts: transTokens}
|
||||
}
|
97
src/tokenizer_test.go
Normal file
97
src/tokenizer_test.go
Normal file
@ -0,0 +1,97 @@
|
||||
package fzf
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParseRange(t *testing.T) {
|
||||
{
|
||||
i := ".."
|
||||
r, _ := ParseRange(&i)
|
||||
if r.begin != RANGE_ELLIPSIS || r.end != RANGE_ELLIPSIS {
|
||||
t.Errorf("%s", r)
|
||||
}
|
||||
}
|
||||
{
|
||||
i := "3.."
|
||||
r, _ := ParseRange(&i)
|
||||
if r.begin != 3 || r.end != RANGE_ELLIPSIS {
|
||||
t.Errorf("%s", r)
|
||||
}
|
||||
}
|
||||
{
|
||||
i := "3..5"
|
||||
r, _ := ParseRange(&i)
|
||||
if r.begin != 3 || r.end != 5 {
|
||||
t.Errorf("%s", r)
|
||||
}
|
||||
}
|
||||
{
|
||||
i := "-3..-5"
|
||||
r, _ := ParseRange(&i)
|
||||
if r.begin != -3 || r.end != -5 {
|
||||
t.Errorf("%s", r)
|
||||
}
|
||||
}
|
||||
{
|
||||
i := "3"
|
||||
r, _ := ParseRange(&i)
|
||||
if r.begin != 3 || r.end != 3 {
|
||||
t.Errorf("%s", r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenize(t *testing.T) {
|
||||
// AWK-style
|
||||
input := " abc: def: ghi "
|
||||
tokens := Tokenize(&input, nil)
|
||||
if *tokens[0].text != "abc: " || tokens[0].prefixLength != 2 {
|
||||
t.Errorf("%s", tokens)
|
||||
}
|
||||
|
||||
// With delimiter
|
||||
tokens = Tokenize(&input, delimiterRegexp(":"))
|
||||
if *tokens[0].text != " abc:" || tokens[0].prefixLength != 0 {
|
||||
t.Errorf("%s", tokens)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransform(t *testing.T) {
|
||||
input := " abc: def: ghi: jkl"
|
||||
{
|
||||
tokens := Tokenize(&input, nil)
|
||||
{
|
||||
ranges := splitNth("1,2,3")
|
||||
tx := Transform(tokens, ranges)
|
||||
if *tx.whole != "abc: def: ghi: " {
|
||||
t.Errorf("%s", *tx)
|
||||
}
|
||||
}
|
||||
{
|
||||
ranges := splitNth("1..2,3,2..,1")
|
||||
tx := Transform(tokens, ranges)
|
||||
if *tx.whole != "abc: def: ghi: def: ghi: jklabc: " ||
|
||||
len(tx.parts) != 4 ||
|
||||
*tx.parts[0].text != "abc: def: " || tx.parts[0].prefixLength != 2 ||
|
||||
*tx.parts[1].text != "ghi: " || tx.parts[1].prefixLength != 14 ||
|
||||
*tx.parts[2].text != "def: ghi: jkl" || tx.parts[2].prefixLength != 8 ||
|
||||
*tx.parts[3].text != "abc: " || tx.parts[3].prefixLength != 2 {
|
||||
t.Errorf("%s", *tx)
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
tokens := Tokenize(&input, delimiterRegexp(":"))
|
||||
{
|
||||
ranges := splitNth("1..2,3,2..,1")
|
||||
tx := Transform(tokens, ranges)
|
||||
if *tx.whole != " abc: def: ghi: def: ghi: jkl abc:" ||
|
||||
len(tx.parts) != 4 ||
|
||||
*tx.parts[0].text != " abc: def:" || tx.parts[0].prefixLength != 0 ||
|
||||
*tx.parts[1].text != " ghi:" || tx.parts[1].prefixLength != 12 ||
|
||||
*tx.parts[2].text != " def: ghi: jkl" || tx.parts[2].prefixLength != 6 ||
|
||||
*tx.parts[3].text != " abc:" || tx.parts[3].prefixLength != 0 {
|
||||
t.Errorf("%s", *tx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
21
src/util.go
Normal file
21
src/util.go
Normal file
@ -0,0 +1,21 @@
|
||||
package fzf
|
||||
|
||||
func Max(first int, items ...int) int {
|
||||
max := first
|
||||
for _, item := range items {
|
||||
if item > max {
|
||||
max = item
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func Min(first int, items ...int) int {
|
||||
min := first
|
||||
for _, item := range items {
|
||||
if item < min {
|
||||
min = item
|
||||
}
|
||||
}
|
||||
return min
|
||||
}
|
18
src/util_test.go
Normal file
18
src/util_test.go
Normal file
@ -0,0 +1,18 @@
|
||||
package fzf
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestMax(t *testing.T) {
|
||||
if Max(-2, 5, 1, 4, 3) != 5 {
|
||||
t.Error("Invalid result")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMin(t *testing.T) {
|
||||
if Min(2, -3) != -3 {
|
||||
t.Error("Invalid result")
|
||||
}
|
||||
if Min(-2, 5, 1, 4, 3) != -2 {
|
||||
t.Error("Invalid result")
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user