From 478a3125cc39479c062d482b1a66f8ed28b5373a Mon Sep 17 00:00:00 2001 From: Anton Medvedev Date: Tue, 12 Mar 2024 23:58:00 +0100 Subject: [PATCH] Add complete --- complete.go | 69 ++++++ internal/shlex/shlex.go | 410 +++++++++++++++++++++++++++++++++++ internal/shlex/shlex_test.go | 113 ++++++++++ main.go | 2 + 4 files changed, 594 insertions(+) create mode 100644 complete.go create mode 100644 internal/shlex/shlex.go create mode 100644 internal/shlex/shlex_test.go diff --git a/complete.go b/complete.go new file mode 100644 index 0000000..f3d9a2c --- /dev/null +++ b/complete.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + "os" + "strings" + + "github.com/antonmedv/fx/internal/shlex" +) + +var flags = []string{ + "--help", + "--raw", + "--slurp", + "--themes", + "--version", + "--yaml", + "-h", + "-r", + "-s", + "-v", +} + +func complete() { + compLine, ok := os.LookupEnv("COMP_LINE") + + if !ok || len(os.Args) < 3 { + return + } + + // Get the current partial word to be completed + partial := os.Args[2] + + var reply []string + + if strings.HasPrefix(partial, "-") { + // Filter the flags that match the partial word + for _, flag := range flags { + if strings.HasPrefix(flag, partial) { + reply = append(reply, flag) + } + } + } + + args, err := shlex.Split(compLine) + if err != nil { + return + } + + if len(args) <= 2 { + reply = files(partial) + } + + for _, word := range reply { + fmt.Println(word) + } + + os.Exit(0) +} + +// log appends the given arguments to the log file. +func log(args ...interface{}) { + file, err := os.OpenFile("complete.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) + if err != nil { + return + } + fmt.Fprintln(file, args...) + file.Close() +} diff --git a/internal/shlex/shlex.go b/internal/shlex/shlex.go new file mode 100644 index 0000000..e0971a5 --- /dev/null +++ b/internal/shlex/shlex.go @@ -0,0 +1,410 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package shlex implements a simple lexer which splits input in to tokens using +shell-style rules for quoting and commenting. + +The basic use case uses the default ASCII lexer to split a string into sub-strings: + + shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"} + +To process a stream of strings: + + l := NewLexer(os.Stdin) + for ; token, err := l.Next(); err != nil { + // process token + } + +To access the raw token stream (which includes tokens for comments): + + t := NewTokenizer(os.Stdin) + for ; token, err := t.Next(); err != nil { + // process token + } +*/ +package shlex + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +// TokenType is a top-level token classification: A word, space, comment, unknown. +type TokenType int + +// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape. +type runeTokenClass int + +// the internal state used by the lexer state machine +type lexerState int + +// Token is a (type, value) pair representing a lexographical token. +type Token struct { + tokenType TokenType + value string +} + +// Equal reports whether tokens a, and b, are equal. +// Two tokens are equal if both their types and values are equal. A nil token can +// never be equal to another token. +func (a *Token) Equal(b *Token) bool { + if a == nil || b == nil { + return false + } + if a.tokenType != b.tokenType { + return false + } + return a.value == b.value +} + +// Named classes of UTF-8 runes +const ( + spaceRunes = " \t\r\n" + escapingQuoteRunes = `"` + nonEscapingQuoteRunes = "'" + escapeRunes = `\` + commentRunes = "#" +) + +// Classes of rune token +const ( + unknownRuneClass runeTokenClass = iota + spaceRuneClass + escapingQuoteRuneClass + nonEscapingQuoteRuneClass + escapeRuneClass + commentRuneClass + eofRuneClass +) + +// Classes of lexographic token +const ( + UnknownToken TokenType = iota + WordToken + SpaceToken + CommentToken +) + +// Lexer state machine states +const ( + startState lexerState = iota // no runes have been seen + inWordState // processing regular runes in a word + escapingState // we have just consumed an escape rune; the next rune is literal + escapingQuotedState // we have just consumed an escape rune within a quoted string + quotingEscapingState // we are within a quoted string that supports escaping ("...") + quotingState // we are within a string that does not support escaping ('...') + commentState // we are within a comment (everything following an unquoted or unescaped # +) + +// tokenClassifier is used for classifying rune characters. +type tokenClassifier map[rune]runeTokenClass + +func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) { + for _, runeChar := range runes { + typeMap[runeChar] = tokenType + } +} + +// newDefaultClassifier creates a new classifier for ASCII characters. +func newDefaultClassifier() tokenClassifier { + t := tokenClassifier{} + t.addRuneClass(spaceRunes, spaceRuneClass) + t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass) + t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass) + t.addRuneClass(escapeRunes, escapeRuneClass) + t.addRuneClass(commentRunes, commentRuneClass) + return t +} + +// ClassifyRune classifiees a rune +func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass { + return t[runeVal] +} + +// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped. +type Lexer Tokenizer + +// NewLexer creates a new lexer from an input stream. +func NewLexer(r io.Reader) *Lexer { + return (*Lexer)(NewTokenizer(r)) +} + +// Next returns the next word, or an error. If there are no more words, +// the error will be io.EOF. +func (l *Lexer) Next() (string, error) { + for { + token, err := (*Tokenizer)(l).Next() + if err != nil { + return "", err + } + switch token.tokenType { + case WordToken: + return token.value, nil + case CommentToken: + // skip comments + default: + return "", fmt.Errorf("Unknown token type: %v", token.tokenType) + } + } +} + +// Tokenizer turns an input stream into a sequence of typed tokens +type Tokenizer struct { + input bufio.Reader + classifier tokenClassifier +} + +// NewTokenizer creates a new tokenizer from an input stream. +func NewTokenizer(r io.Reader) *Tokenizer { + input := bufio.NewReader(r) + classifier := newDefaultClassifier() + return &Tokenizer{ + input: *input, + classifier: classifier} +} + +// scanStream scans the stream for the next token using the internal state machine. +// It will panic if it encounters a rune which it does not know how to handle. +func (t *Tokenizer) scanStream() (*Token, error) { + state := startState + var tokenType TokenType + var value []rune + var nextRune rune + var nextRuneType runeTokenClass + var err error + + for { + nextRune, _, err = t.input.ReadRune() + nextRuneType = t.classifier.ClassifyRune(nextRune) + + if err == io.EOF { + nextRuneType = eofRuneClass + err = nil + } else if err != nil { + return nil, err + } + + switch state { + case startState: // no runes read yet + { + switch nextRuneType { + case eofRuneClass: + { + return nil, io.EOF + } + case spaceRuneClass: + { + } + case escapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingState + } + case escapeRuneClass: + { + tokenType = WordToken + state = escapingState + } + case commentRuneClass: + { + tokenType = CommentToken + state = commentState + } + default: + { + tokenType = WordToken + value = append(value, nextRune) + state = inWordState + } + } + } + case inWordState: // in a regular word + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + state = quotingState + } + case escapeRuneClass: + { + state = escapingState + } + default: + { + value = append(value, nextRune) + } + } + } + case escapingState: // the rune after an escape character + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, nil + } + default: + { + state = inWordState + value = append(value, nextRune) + } + } + } + case escapingQuotedState: // the next rune after an escape character, in double quotes + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, nil + } + default: + { + state = quotingEscapingState + value = append(value, nextRune) + } + } + } + case quotingEscapingState: // in escaping double quotes + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, nil + } + case escapingQuoteRuneClass: + { + state = inWordState + } + case escapeRuneClass: + { + state = escapingQuotedState + } + default: + { + value = append(value, nextRune) + } + } + } + case quotingState: // in non-escaping single quotes + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, nil + } + case nonEscapingQuoteRuneClass: + { + state = inWordState + } + default: + { + value = append(value, nextRune) + } + } + } + case commentState: // in a comment + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + if nextRune == '\n' { + state = startState + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } else { + value = append(value, nextRune) + } + } + default: + { + value = append(value, nextRune) + } + } + } + default: + { + return nil, fmt.Errorf("Unexpected state: %v", state) + } + } + } +} + +// Next returns the next token in the stream. +func (t *Tokenizer) Next() (*Token, error) { + return t.scanStream() +} + +// Split partitions a string into a slice of strings. +func Split(s string) ([]string, error) { + l := NewLexer(strings.NewReader(s)) + subStrings := make([]string, 0) + for { + word, err := l.Next() + if err != nil { + if err == io.EOF { + return subStrings, nil + } + return subStrings, err + } + subStrings = append(subStrings, word) + } +} diff --git a/internal/shlex/shlex_test.go b/internal/shlex/shlex_test.go new file mode 100644 index 0000000..2bb4573 --- /dev/null +++ b/internal/shlex/shlex_test.go @@ -0,0 +1,113 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shlex + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +var ( + // one two "three four" "five \"six\"" seven#eight # nine # ten + // eleven 'twelve\' + testString = "one two \"three four\" \"five \\\"six\\\"\" seven#eight # nine # ten\n eleven 'twelve\\' thirteen=13 fourteen/14" +) + +func TestClassifier(t *testing.T) { + classifier := newDefaultClassifier() + tests := map[rune]runeTokenClass{ + ' ': spaceRuneClass, + '"': escapingQuoteRuneClass, + '\'': nonEscapingQuoteRuneClass, + '#': commentRuneClass} + for runeChar, want := range tests { + got := classifier.ClassifyRune(runeChar) + if got != want { + t.Errorf("ClassifyRune(%v) -> %v. Want: %v", runeChar, got, want) + } + } +} + +func TestTokenizer(t *testing.T) { + testInput := strings.NewReader(testString) + expectedTokens := []*Token{ + &Token{WordToken, "one"}, + &Token{WordToken, "two"}, + &Token{WordToken, "three four"}, + &Token{WordToken, "five \"six\""}, + &Token{WordToken, "seven#eight"}, + &Token{CommentToken, " nine # ten"}, + &Token{WordToken, "eleven"}, + &Token{WordToken, "twelve\\"}, + &Token{WordToken, "thirteen=13"}, + &Token{WordToken, "fourteen/14"}} + + tokenizer := NewTokenizer(testInput) + for i, want := range expectedTokens { + got, err := tokenizer.Next() + if err != nil { + t.Error(err) + } + if !got.Equal(want) { + t.Errorf("Tokenizer.Next()[%v] of %q -> %v. Want: %v", i, testString, got, want) + } + } +} + +func TestLexer(t *testing.T) { + testInput := strings.NewReader(testString) + expectedStrings := []string{"one", "two", "three four", "five \"six\"", "seven#eight", "eleven", "twelve\\", "thirteen=13", "fourteen/14"} + + lexer := NewLexer(testInput) + for i, want := range expectedStrings { + got, err := lexer.Next() + if err != nil { + t.Error(err) + } + if got != want { + t.Errorf("Lexer.Next()[%v] of %q -> %v. Want: %v", i, testString, got, want) + } + } +} + +func TestSplit(t *testing.T) { + want := []string{"one", "two", "three four", "five \"six\"", "seven#eight", "eleven", "twelve\\", "thirteen=13", "fourteen/14"} + got, err := Split(testString) + if err != nil { + t.Error(err) + } + if len(want) != len(got) { + t.Errorf("Split(%q) -> %v. Want: %v", testString, got, want) + } + for i := range got { + if got[i] != want[i] { + t.Errorf("Split(%q)[%v] -> %v. Want: %v", testString, i, got[i], want[i]) + } + } +} + +func TestSplit_unfinished(t *testing.T) { + got, err := Split("one 'two") + require.NoError(t, err) + require.Equal(t, []string{"one", "two"}, got) + + got, err = Split("one \"two") + require.NoError(t, err) + require.Equal(t, []string{"one", "two"}, got) +} diff --git a/main.go b/main.go index 3a3d715..f7b79e4 100644 --- a/main.go +++ b/main.go @@ -51,6 +51,8 @@ func main() { defer pprof.WriteHeapProfile(memProf) } + complete() + var args []string for _, arg := range os.Args[1:] { switch arg {