本文整理汇总了Golang中github.com/junegunn/fzf/src/util.RunesToChars函数的典型用法代码示例。如果您正苦于以下问题:Golang RunesToChars函数的具体用法?Golang RunesToChars怎么用?Golang RunesToChars使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了RunesToChars函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestOrigTextAndTransformed
func TestOrigTextAndTransformed(t *testing.T) {
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("jg"))
tokens := Tokenize(util.RunesToChars([]rune("junegunn")), Delimiter{})
trans := Transform(tokens, []Range{Range{1, 1}})
origBytes := []byte("junegunn.choi")
for _, extended := range []bool{false, true} {
chunk := Chunk{
&Item{
text: util.RunesToChars([]rune("junegunn")),
origText: &origBytes,
transformed: trans},
}
pattern.extended = extended
matches := pattern.matchChunk(&chunk, nil, slab) // No cache
if !(matches[0].item.text.ToString() == "junegunn" &&
string(*matches[0].item.origText) == "junegunn.choi" &&
reflect.DeepEqual(matches[0].item.transformed, trans)) {
t.Error("Invalid match result", matches)
}
match, offsets, pos := pattern.MatchItem(chunk[0], true, slab)
if !(match.item.text.ToString() == "junegunn" &&
string(*match.item.origText) == "junegunn.choi" &&
offsets[0][0] == 0 && offsets[0][1] == 5 &&
reflect.DeepEqual(match.item.transformed, trans)) {
t.Error("Invalid match result", match, offsets, extended)
}
if !((*pos)[0] == 4 && (*pos)[1] == 0) {
t.Error("Invalid pos array", *pos)
}
}
}
开发者ID:davidcgl,项目名称:fzf,代码行数:33,代码来源:pattern_test.go
示例2: printHeader
func (t *Terminal) printHeader() {
if len(t.header) == 0 {
return
}
max := t.window.Height
var state *ansiState
for idx, lineStr := range t.header {
line := idx + 2
if t.inlineInfo {
line--
}
if line >= max {
continue
}
trimmed, colors, newState := extractColor(lineStr, state, nil)
state = newState
item := &Item{
text: util.RunesToChars([]rune(trimmed)),
colors: colors}
t.move(line, 2, true)
t.printHighlighted(&Result{item: item},
tui.AttrRegular, tui.ColHeader, tui.ColDefault, false)
}
}
开发者ID:plotnikovanton,项目名称:dotfiles,代码行数:25,代码来源:terminal.go
示例3: Tokenize
// Tokenize tokenizes the given string with the delimiter
func Tokenize(text util.Chars, delimiter Delimiter) []Token {
if delimiter.str == nil && delimiter.regex == nil {
// AWK-style (\S+\s*)
tokens, prefixLength := awkTokenizer(text)
return withPrefixLengths(tokens, prefixLength)
}
if delimiter.str != nil {
return withPrefixLengths(text.Split(*delimiter.str), 0)
}
// FIXME performance
var tokens []string
if delimiter.regex != nil {
str := text.ToString()
for len(str) > 0 {
loc := delimiter.regex.FindStringIndex(str)
if loc == nil {
loc = []int{0, len(str)}
}
last := util.Max(loc[1], 1)
tokens = append(tokens, str[:last])
str = str[last:]
}
}
asRunes := make([]util.Chars, len(tokens))
for i, token := range tokens {
asRunes[i] = util.RunesToChars([]rune(token))
}
return withPrefixLengths(asRunes, 0)
}
开发者ID:plotnikovanton,项目名称:dotfiles,代码行数:32,代码来源:tokenizer.go
示例4: TestDelimiterRegexRegex
func TestDelimiterRegexRegex(t *testing.T) {
delim := delimiterRegexp("--\\*")
tokens := Tokenize(util.RunesToChars([]rune("-*--*---**---")), delim)
if delim.str != nil ||
tokens[0].text.ToString() != "-*--*" ||
tokens[1].text.ToString() != "---*" ||
tokens[2].text.ToString() != "*---" {
t.Errorf("%s %d", tokens, len(tokens))
}
}
开发者ID:robertmeta,项目名称:vimfiles,代码行数:10,代码来源:options_test.go
示例5: TestResultRank
// Match length, string length, index
func TestResultRank(t *testing.T) {
// FIXME global
sortCriteria = []criterion{byScore, byLength}
strs := [][]rune{[]rune("foo"), []rune("foobar"), []rune("bar"), []rune("baz")}
item1 := buildResult(&Item{text: util.RunesToChars(strs[0]), index: 1}, []Offset{}, 2, 3)
if item1.rank.points[0] != math.MaxUint16-2 || // Bonus
item1.rank.points[1] != 3 || // Length
item1.rank.points[2] != 0 || // Unused
item1.rank.points[3] != 0 || // Unused
item1.item.index != 1 {
t.Error(item1.rank)
}
// Only differ in index
item2 := buildResult(&Item{text: util.RunesToChars(strs[0])}, []Offset{}, 2, 3)
items := []*Result{item1, item2}
sort.Sort(ByRelevance(items))
if items[0] != item2 || items[1] != item1 {
t.Error(items)
}
items = []*Result{item2, item1, item1, item2}
sort.Sort(ByRelevance(items))
if items[0] != item2 || items[1] != item2 ||
items[2] != item1 || items[3] != item1 {
t.Error(items, item1, item1.item.index, item2, item2.item.index)
}
// Sort by relevance
item3 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 3, 0)
item4 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 4, 0)
item5 := buildResult(&Item{index: 2}, []Offset{Offset{1, 3}, Offset{5, 7}}, 5, 0)
item6 := buildResult(&Item{index: 2}, []Offset{Offset{1, 2}, Offset{6, 7}}, 6, 0)
items = []*Result{item1, item2, item3, item4, item5, item6}
sort.Sort(ByRelevance(items))
if !(items[0] == item6 && items[1] == item5 &&
items[2] == item4 && items[3] == item3 &&
items[4] == item2 && items[5] == item1) {
t.Error(items, item1, item2, item3, item4, item5, item6)
}
}
开发者ID:davidcgl,项目名称:fzf,代码行数:43,代码来源:result_test.go
示例6: TestTransform
func TestTransform(t *testing.T) {
input := " abc: def: ghi: jkl"
{
tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
{
ranges := splitNth("1,2,3")
tx := Transform(tokens, ranges)
if string(joinTokens(tx)) != "abc: def: ghi: " {
t.Errorf("%s", tx)
}
}
{
ranges := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)
if string(joinTokens(tx)) != "abc: def: ghi: def: ghi: jklabc: " ||
len(tx) != 4 ||
tx[0].text.ToString() != "abc: def: " || tx[0].prefixLength != 2 ||
tx[1].text.ToString() != "ghi: " || tx[1].prefixLength != 14 ||
tx[2].text.ToString() != "def: ghi: jkl" || tx[2].prefixLength != 8 ||
tx[3].text.ToString() != "abc: " || tx[3].prefixLength != 2 {
t.Errorf("%s", tx)
}
}
}
{
tokens := Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
{
ranges := splitNth("1..2,3,2..,1")
tx := Transform(tokens, ranges)
if string(joinTokens(tx)) != " abc: def: ghi: def: ghi: jkl abc:" ||
len(tx) != 4 ||
tx[0].text.ToString() != " abc: def:" || tx[0].prefixLength != 0 ||
tx[1].text.ToString() != " ghi:" || tx[1].prefixLength != 12 ||
tx[2].text.ToString() != " def: ghi: jkl" || tx[2].prefixLength != 6 ||
tx[3].text.ToString() != " abc:" || tx[3].prefixLength != 0 {
t.Errorf("%s", tx)
}
}
}
}
开发者ID:plotnikovanton,项目名称:dotfiles,代码行数:40,代码来源:tokenizer_test.go
示例7: replacePlaceholder
func replacePlaceholder(template string, stripAnsi bool, delimiter Delimiter, query string, items []*Item) string {
return placeholder.ReplaceAllStringFunc(template, func(match string) string {
// Escaped pattern
if match[0] == '\\' {
return match[1:]
}
// Current query
if match == "{q}" {
return quoteEntry(query)
}
replacements := make([]string, len(items))
if match == "{}" {
for idx, item := range items {
replacements[idx] = quoteEntry(item.AsString(stripAnsi))
}
return strings.Join(replacements, " ")
}
tokens := strings.Split(match[1:len(match)-1], ",")
ranges := make([]Range, len(tokens))
for idx, s := range tokens {
r, ok := ParseRange(&s)
if !ok {
// Invalid expression, just return the original string in the template
return match
}
ranges[idx] = r
}
for idx, item := range items {
chars := util.RunesToChars([]rune(item.AsString(stripAnsi)))
tokens := Tokenize(chars, delimiter)
trans := Transform(tokens, ranges)
str := string(joinTokens(trans))
if delimiter.str != nil {
str = strings.TrimSuffix(str, *delimiter.str)
} else if delimiter.regex != nil {
delims := delimiter.regex.FindAllStringIndex(str, -1)
if len(delims) > 0 && delims[len(delims)-1][1] == len(str) {
str = str[:delims[len(delims)-1][0]]
}
}
str = strings.TrimSpace(str)
replacements[idx] = quoteEntry(str)
}
return strings.Join(replacements, " ")
})
}
开发者ID:plotnikovanton,项目名称:dotfiles,代码行数:51,代码来源:terminal.go
示例8: TestExact
func TestExact(t *testing.T) {
defer clearPatternCache()
clearPatternCache()
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true,
[]Range{}, Delimiter{}, []rune("'abc"))
res, pos := algo.ExactMatchNaive(
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune("aabbcc abc")), pattern.termSets[0][0].text, true, nil)
if res.Start != 7 || res.End != 10 {
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
}
if pos != nil {
t.Errorf("pos is expected to be nil")
}
}
开发者ID:davidcgl,项目名称:fzf,代码行数:14,代码来源:pattern_test.go
示例9: TestTokenize
func TestTokenize(t *testing.T) {
// AWK-style
input := " abc: def: ghi "
tokens := Tokenize(util.RunesToChars([]rune(input)), Delimiter{})
if tokens[0].text.ToString() != "abc: " || tokens[0].prefixLength != 2 || tokens[0].trimLength != 4 {
t.Errorf("%s", tokens)
}
// With delimiter
tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp(":"))
if tokens[0].text.ToString() != " abc:" || tokens[0].prefixLength != 0 || tokens[0].trimLength != 4 {
t.Errorf("%s", tokens)
}
// With delimiter regex
tokens = Tokenize(util.RunesToChars([]rune(input)), delimiterRegexp("\\s+"))
if tokens[0].text.ToString() != " " || tokens[0].prefixLength != 0 || tokens[0].trimLength != 0 ||
tokens[1].text.ToString() != "abc: " || tokens[1].prefixLength != 2 || tokens[1].trimLength != 4 ||
tokens[2].text.ToString() != "def: " || tokens[2].prefixLength != 8 || tokens[2].trimLength != 4 ||
tokens[3].text.ToString() != "ghi " || tokens[3].prefixLength != 14 || tokens[3].trimLength != 3 {
t.Errorf("%s", tokens)
}
}
开发者ID:plotnikovanton,项目名称:dotfiles,代码行数:23,代码来源:tokenizer_test.go
示例10: TestEqual
func TestEqual(t *testing.T) {
defer clearPatternCache()
clearPatternCache()
pattern := BuildPattern(true, algo.FuzzyMatchV2, true, CaseSmart, true, true, []Range{}, Delimiter{}, []rune("^AbC$"))
match := func(str string, sidxExpected int, eidxExpected int) {
res, pos := algo.EqualMatch(
pattern.caseSensitive, pattern.forward, util.RunesToChars([]rune(str)), pattern.termSets[0][0].text, true, nil)
if res.Start != sidxExpected || res.End != eidxExpected {
t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
}
if pos != nil {
t.Errorf("pos is expected to be nil")
}
}
match("ABC", -1, -1)
match("AbC", 0, 3)
}
开发者ID:davidcgl,项目名称:fzf,代码行数:18,代码来源:pattern_test.go
示例11: assertMatch
func assertMatch(t *testing.T, fun Algo, caseSensitive, forward bool, input, pattern string, sidx int, eidx int, score int) {
if !caseSensitive {
pattern = strings.ToLower(pattern)
}
res, pos := fun(caseSensitive, forward, util.RunesToChars([]rune(input)), []rune(pattern), true, nil)
var start, end int
if pos == nil || len(*pos) == 0 {
start = res.Start
end = res.End
} else {
sort.Ints(*pos)
start = (*pos)[0]
end = (*pos)[len(*pos)-1] + 1
}
if start != sidx {
t.Errorf("Invalid start index: %d (expected: %d, %s / %s)", start, sidx, input, pattern)
}
if end != eidx {
t.Errorf("Invalid end index: %d (expected: %d, %s / %s)", end, eidx, input, pattern)
}
if res.Score != score {
t.Errorf("Invalid score: %d (expected: %d, %s / %s)", res.Score, score, input, pattern)
}
}
开发者ID:plotnikovanton,项目名称:dotfiles,代码行数:24,代码来源:algo_test.go
示例12: Run
// Run starts fzf
func Run(opts *Options) {
sort := opts.Sort > 0
sortCriteria = opts.Criteria
if opts.Version {
fmt.Println(version)
os.Exit(exitOk)
}
// Event channel
eventBox := util.NewEventBox()
// ANSI code processor
ansiProcessor := func(data []byte) (util.Chars, *[]ansiOffset) {
return util.ToChars(data), nil
}
ansiProcessorRunes := func(data []rune) (util.Chars, *[]ansiOffset) {
return util.RunesToChars(data), nil
}
if opts.Ansi {
if opts.Theme != nil {
var state *ansiState
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
trimmed, offsets, newState := extractColor(string(data), state, nil)
state = newState
return util.RunesToChars([]rune(trimmed)), offsets
}
} else {
// When color is disabled but ansi option is given,
// we simply strip out ANSI codes from the input
ansiProcessor = func(data []byte) (util.Chars, *[]ansiOffset) {
trimmed, _, _ := extractColor(string(data), nil, nil)
return util.RunesToChars([]rune(trimmed)), nil
}
}
ansiProcessorRunes = func(data []rune) (util.Chars, *[]ansiOffset) {
return ansiProcessor([]byte(string(data)))
}
}
// Chunk list
var chunkList *ChunkList
header := make([]string, 0, opts.HeaderLines)
if len(opts.WithNth) == 0 {
chunkList = NewChunkList(func(data []byte, index int) *Item {
if len(header) < opts.HeaderLines {
header = append(header, string(data))
eventBox.Set(EvtHeader, header)
return nil
}
chars, colors := ansiProcessor(data)
return &Item{
index: int32(index),
text: chars,
colors: colors}
})
} else {
chunkList = NewChunkList(func(data []byte, index int) *Item {
tokens := Tokenize(util.ToChars(data), opts.Delimiter)
trans := Transform(tokens, opts.WithNth)
if len(header) < opts.HeaderLines {
header = append(header, string(joinTokens(trans)))
eventBox.Set(EvtHeader, header)
return nil
}
textRunes := joinTokens(trans)
item := Item{
index: int32(index),
origText: &data,
colors: nil}
trimmed, colors := ansiProcessorRunes(textRunes)
item.text = trimmed
item.colors = colors
return &item
})
}
// Reader
streamingFilter := opts.Filter != nil && !sort && !opts.Tac && !opts.Sync
if !streamingFilter {
reader := Reader{func(data []byte) bool {
return chunkList.Push(data)
}, eventBox, opts.ReadZero}
go reader.ReadSource()
}
// Matcher
forward := true
for _, cri := range opts.Criteria[1:] {
if cri == byEnd {
forward = false
break
}
if cri == byBegin {
break
}
}
patternBuilder := func(runes []rune) *Pattern {
//.........这里部分代码省略.........
开发者ID:plotnikovanton,项目名称:dotfiles,代码行数:101,代码来源:core.go
示例13: Transform
// Transform is used to transform the input when --with-nth option is given
func Transform(tokens []Token, withNth []Range) []Token {
transTokens := make([]Token, len(withNth))
numTokens := len(tokens)
for idx, r := range withNth {
parts := []*util.Chars{}
minIdx := 0
if r.begin == r.end {
idx := r.begin
if idx == rangeEllipsis {
chars := util.RunesToChars(joinTokens(tokens))
parts = append(parts, &chars)
} else {
if idx < 0 {
idx += numTokens + 1
}
if idx >= 1 && idx <= numTokens {
minIdx = idx - 1
parts = append(parts, tokens[idx-1].text)
}
}
} else {
var begin, end int
if r.begin == rangeEllipsis { // ..N
begin, end = 1, r.end
if end < 0 {
end += numTokens + 1
}
} else if r.end == rangeEllipsis { // N..
begin, end = r.begin, numTokens
if begin < 0 {
begin += numTokens + 1
}
} else {
begin, end = r.begin, r.end
if begin < 0 {
begin += numTokens + 1
}
if end < 0 {
end += numTokens + 1
}
}
minIdx = util.Max(0, begin-1)
for idx := begin; idx <= end; idx++ {
if idx >= 1 && idx <= numTokens {
parts = append(parts, tokens[idx-1].text)
}
}
}
// Merge multiple parts
var merged util.Chars
switch len(parts) {
case 0:
merged = util.RunesToChars([]rune{})
case 1:
merged = *parts[0]
default:
runes := []rune{}
for _, part := range parts {
runes = append(runes, part.ToRunes()...)
}
merged = util.RunesToChars(runes)
}
var prefixLength int32
if minIdx < numTokens {
prefixLength = tokens[minIdx].prefixLength
} else {
prefixLength = 0
}
transTokens[idx] = Token{&merged, prefixLength, int32(merged.TrimLength())}
}
return transTokens
}
开发者ID:plotnikovanton,项目名称:dotfiles,代码行数:74,代码来源:tokenizer.go
示例14: newItem
func newItem(str string) *Item {
bytes := []byte(str)
trimmed, _, _ := extractColor(str, nil, nil)
return &Item{origText: &bytes, text: util.RunesToChars([]rune(trimmed))}
}
开发者ID:plotnikovanton,项目名称:dotfiles,代码行数:5,代码来源:terminal_test.go
注:本文中的github.com/junegunn/fzf/src/util.RunesToChars函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论