From 21b94d2de5bdf2d8c4e8107e4c197a2abc1e7275 Mon Sep 17 00:00:00 2001
From: Ryan Boehning <ryanboehning@gmail.com>
Date: Sat, 17 Feb 2018 14:01:06 -0800
Subject: [PATCH] Make fzf pass go vet

Add String() methods to types, so they can be printed with %s. Change
some %s format specifiers to %v, when the default string representation
is good enough. In Go 1.10, `go test` triggers a parallel `go vet`. So
this also makes fzf pass `go test`.

Close #1236
Close #1219
---
 src/ansi_test.go      |  2 +-
 src/options_test.go   | 12 ++++++------
 src/pattern.go        |  6 ++++++
 src/pattern_test.go   | 12 ++++++------
 src/tokenizer.go      | 11 +++++++++++
 src/tokenizer_test.go | 10 +++++-----
 src/util/chars.go     |  6 ++++++
 7 files changed, 41 insertions(+), 18 deletions(-)

diff --git a/src/ansi_test.go b/src/ansi_test.go
index a5366770..d94ae931 100644
--- a/src/ansi_test.go
+++ b/src/ansi_test.go
@@ -26,7 +26,7 @@ func TestExtractColor(t *testing.T) {
 		output, ansiOffsets, newState := extractColor(src, state, nil)
 		state = newState
 		if output != "hello world" {
-			t.Errorf("Invalid output: %s %s", output, []rune(output))
+			t.Errorf("Invalid output: %s %v", output, []rune(output))
 		}
 		fmt.Println(src, ansiOffsets, clean)
 		assertion(ansiOffsets, state)
diff --git a/src/options_test.go b/src/options_test.go
index 22f4e4ee..543bf570 100644
--- a/src/options_test.go
+++ b/src/options_test.go
@@ -50,7 +50,7 @@ func TestDelimiterRegexString(t *testing.T) {
 		tokens[2].text.ToString() != "---*" ||
 		tokens[3].text.ToString() != "*" ||
 		tokens[4].text.ToString() != "---" {
-		t.Errorf("%s %s %d", delim, tokens, len(tokens))
+		t.Errorf("%s %v %d", delim, tokens, len(tokens))
 	}
 }
 
@@ -71,7 +71,7 @@ func TestSplitNth(t *testing.T) {
 		if len(ranges) != 1 ||
 			ranges[0].begin != rangeEllipsis ||
 			ranges[0].end != rangeEllipsis {
-			t.Errorf("%s", ranges)
+			t.Errorf("%v", ranges)
 		}
 	}
 	{
@@ -87,7 +87,7 @@ func TestSplitNth(t *testing.T) {
 			ranges[7].begin != -2 || ranges[7].end != -2 ||
 			ranges[8].begin != 2 || ranges[8].end != -2 ||
 			ranges[9].begin != rangeEllipsis || ranges[9].end != rangeEllipsis {
-			t.Errorf("%s", ranges)
+			t.Errorf("%v", ranges)
 		}
 	}
 }
@@ -99,7 +99,7 @@ func TestIrrelevantNth(t *testing.T) {
 		parseOptions(opts, words)
 		postProcessOptions(opts)
 		if len(opts.Nth) != 0 {
-			t.Errorf("nth should be empty: %s", opts.Nth)
+			t.Errorf("nth should be empty: %v", opts.Nth)
 		}
 	}
 	for _, words := range [][]string{[]string{"--nth", "..,3", "+x"}, []string{"--nth", "3,1..", "+x"}, []string{"--nth", "..-1,1", "+x"}} {
@@ -108,7 +108,7 @@ func TestIrrelevantNth(t *testing.T) {
 			parseOptions(opts, words)
 			postProcessOptions(opts)
 			if len(opts.Nth) != 0 {
-				t.Errorf("nth should be empty: %s", opts.Nth)
+				t.Errorf("nth should be empty: %v", opts.Nth)
 			}
 		}
 		{
@@ -117,7 +117,7 @@ func TestIrrelevantNth(t *testing.T) {
 			parseOptions(opts, words)
 			postProcessOptions(opts)
 			if len(opts.Nth) != 2 {
-				t.Errorf("nth should not be empty: %s", opts.Nth)
+				t.Errorf("nth should not be empty: %v", opts.Nth)
 			}
 		}
 	}
diff --git a/src/pattern.go b/src/pattern.go
index 636ae1ee..2627dea6 100644
--- a/src/pattern.go
+++ b/src/pattern.go
@@ -1,6 +1,7 @@
 package fzf
 
 import (
+	"fmt"
 	"regexp"
 	"strings"
 
@@ -34,6 +35,11 @@ type term struct {
 	caseSensitive bool
 }
 
+// String returns the string representation of a term.
+func (t term) String() string {
+	return fmt.Sprintf("term{typ: %d, inv: %v, text: []rune(%q), caseSensitive: %v}", t.typ, t.inv, string(t.text), t.caseSensitive)
+}
+
 type termSet []term
 
 // Pattern represents search pattern
diff --git a/src/pattern_test.go b/src/pattern_test.go
index 1930dddb..bfadf5d8 100644
--- a/src/pattern_test.go
+++ b/src/pattern_test.go
@@ -31,12 +31,12 @@ func TestParseTermsExtended(t *testing.T) {
 		terms[8][1].typ != termExact || terms[8][1].inv ||
 		terms[8][2].typ != termSuffix || terms[8][2].inv ||
 		terms[8][3].typ != termExact || !terms[8][3].inv {
-		t.Errorf("%s", terms)
+		t.Errorf("%v", terms)
 	}
 	for _, termSet := range terms[:8] {
 		term := termSet[0]
 		if len(term.text) != 3 {
-			t.Errorf("%s", term)
+			t.Errorf("%v", term)
 		}
 	}
 }
@@ -53,14 +53,14 @@ func TestParseTermsExtendedExact(t *testing.T) {
 		terms[5][0].typ != termFuzzy || !terms[5][0].inv || len(terms[5][0].text) != 3 ||
 		terms[6][0].typ != termPrefix || !terms[6][0].inv || len(terms[6][0].text) != 3 ||
 		terms[7][0].typ != termSuffix || !terms[7][0].inv || len(terms[7][0].text) != 3 {
-		t.Errorf("%s", terms)
+		t.Errorf("%v", terms)
 	}
 }
 
 func TestParseTermsEmpty(t *testing.T) {
 	terms := parseTerms(true, CaseSmart, false, "' ^ !' !^")
 	if len(terms) != 0 {
-		t.Errorf("%s", terms)
+		t.Errorf("%v", terms)
 	}
 }
 
@@ -73,7 +73,7 @@ func TestExact(t *testing.T) {
 	res, pos := algo.ExactMatchNaive(
 		pattern.caseSensitive, pattern.normalize, pattern.forward, &chars, pattern.termSets[0][0].text, true, nil)
 	if res.Start != 7 || res.End != 10 {
-		t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
+		t.Errorf("%v / %d / %d", pattern.termSets, res.Start, res.End)
 	}
 	if pos != nil {
 		t.Errorf("pos is expected to be nil")
@@ -90,7 +90,7 @@ func TestEqual(t *testing.T) {
 		res, pos := algo.EqualMatch(
 			pattern.caseSensitive, pattern.normalize, pattern.forward, &chars, pattern.termSets[0][0].text, true, nil)
 		if res.Start != sidxExpected || res.End != eidxExpected {
-			t.Errorf("%s / %d / %d", pattern.termSets, res.Start, res.End)
+			t.Errorf("%v / %d / %d", pattern.termSets, res.Start, res.End)
 		}
 		if pos != nil {
 			t.Errorf("pos is expected to be nil")
diff --git a/src/tokenizer.go b/src/tokenizer.go
index 6c1d8cab..208d79e9 100644
--- a/src/tokenizer.go
+++ b/src/tokenizer.go
@@ -2,6 +2,7 @@ package fzf
 
 import (
 	"bytes"
+	"fmt"
 	"regexp"
 	"strconv"
 	"strings"
@@ -23,12 +24,22 @@ type Token struct {
 	prefixLength int32
 }
 
+// String returns the string representation of a Token.
+func (t Token) String() string {
+	return fmt.Sprintf("Token{text: %s, prefixLength: %d}", t.text, t.prefixLength)
+}
+
 // Delimiter for tokenizing the input
 type Delimiter struct {
 	regex *regexp.Regexp
 	str   *string
 }
 
+// String returns the string representation of a Delimeter.
+func (d Delimiter) String() string {
+	return fmt.Sprintf("Delimiter{regex: %v, str: &%q}", d.regex, *d.str)
+}
+
 func newRange(begin int, end int) Range {
 	if begin == 1 {
 		begin = rangeEllipsis
diff --git a/src/tokenizer_test.go b/src/tokenizer_test.go
index 110fd062..0bdd0c19 100644
--- a/src/tokenizer_test.go
+++ b/src/tokenizer_test.go
@@ -9,35 +9,35 @@ func TestParseRange(t *testing.T) {
 		i := ".."
 		r, _ := ParseRange(&i)
 		if r.begin != rangeEllipsis || r.end != rangeEllipsis {
-			t.Errorf("%s", r)
+			t.Errorf("%v", r)
 		}
 	}
 	{
 		i := "3.."
 		r, _ := ParseRange(&i)
 		if r.begin != 3 || r.end != rangeEllipsis {
-			t.Errorf("%s", r)
+			t.Errorf("%v", r)
 		}
 	}
 	{
 		i := "3..5"
 		r, _ := ParseRange(&i)
 		if r.begin != 3 || r.end != 5 {
-			t.Errorf("%s", r)
+			t.Errorf("%v", r)
 		}
 	}
 	{
 		i := "-3..-5"
 		r, _ := ParseRange(&i)
 		if r.begin != -3 || r.end != -5 {
-			t.Errorf("%s", r)
+			t.Errorf("%v", r)
 		}
 	}
 	{
 		i := "3"
 		r, _ := ParseRange(&i)
 		if r.begin != 3 || r.end != 3 {
-			t.Errorf("%s", r)
+			t.Errorf("%v", r)
 		}
 	}
 }
diff --git a/src/util/chars.go b/src/util/chars.go
index 9e58313b..ec6fca0e 100644
--- a/src/util/chars.go
+++ b/src/util/chars.go
@@ -1,6 +1,7 @@
 package util
 
 import (
+	"fmt"
 	"unicode"
 	"unicode/utf8"
 	"unsafe"
@@ -94,6 +95,11 @@ func (chars *Chars) Length() int {
 	return len(chars.slice)
 }
 
+// String returns the string representation of a Chars object.
+func (chars *Chars) String() string {
+	return fmt.Sprintf("Chars{slice: []byte(%q), inBytes: %v, trimLengthKnown: %v, trimLength: %d, Index: %d}", chars.slice, chars.inBytes, chars.trimLengthKnown, chars.trimLength, chars.Index)
+}
+
 // TrimLength returns the length after trimming leading and trailing whitespaces
 func (chars *Chars) TrimLength() uint16 {
 	if chars.trimLengthKnown {
-- 
GitLab