ref: 94482cb2c97aa215f83940643c5d4c0933727dcb
parent: c6d2e563e647956590267c6b5a51e4c14c88a68c
author: Runxi Yu <me@runxiyu.org>
date: Sat Feb 21 08:38:02 EST 2026
*: Modernize and lint; add CI
--- /dev/null
+++ b/.builds/alpine.yml
@@ -1,0 +1,27 @@
+image: alpine/edge
+packages:
+ - go
+ - git
+ - golangci-lint
+tasks:
+ - build: |
+ cd furgit
+ go build ./...
+ - test: |
+ cd furgit
+ go test -v ./...
+ - test-race: |
+ cd furgit
+ go test -race -v ./...
+ - test-purego: |
+ cd furgit
+ go test -v -tags purego ./...
+ - lint: |
+ cd furgit
+ golangci-lint run ./...
+ - vet: |
+ cd furgit
+ go vet ./...
+ - fmt-check: |
+ cd furgit
+ test -z "$(gofmt -l .)"
--- /dev/null
+++ b/.golangci.yaml
@@ -1,0 +1,44 @@
+version: "2"
+
+linters:
+ default: all
+ disable:
+ - depguard # not sensible for us
+ - dupword # extremely normal in tests and a pretty unnecessary linter
+ - goconst # unnecessary especially for our parsing code; many false positives
+ - mnd # same as above
+ - lll # poor standard
+ - ireturn # not an issue
+ - perfsprint # silly fmt.Errorf vs errors.New suggestion
+ - noinlineerr # not an issue
+ - gosmopolitan # completely normal to have CJK and such in tests
+ - gochecknoglobals # unlikely to be introduce accidentally and are usually intentional
+ - nonamedreturns # named returns are often good for clarity
+ - exhaustruct # tmp: should fix... but too annoying at the moment
+ - wsl_v5 # tmp
+ - wsl # tmp
+ - err113 # tmp: will enable when we properly use defined errors
+ - gochecknoinits # tmp
+ - nlreturn # tmp
+ - cyclop # tmp
+ - gocognit # tmp
+ - varnamelen # tmp
+ - funlen # tmp
+ - godox # tmp
+ - nestif # tmp
+ - maintidx # tmp
+ - gocyclo # tmp
+ - wrapcheck # unsure
+ settings:
+ gosec:
+ excludes:
+ - G301 # UNIX permissions
+ - G306
+ revive:
+ rules:
+ - name: error-strings
+ disabled: true
+
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
--- a/config/config.go
+++ b/config/config.go
@@ -101,7 +101,7 @@
for {ch, err := p.nextChar()
- if err == io.EOF {+ if errors.Is(err, io.EOF) {break
}
if err != nil {@@ -115,7 +115,7 @@
// Comments
if ch == '#' || ch == ';' {- if err := p.skipToEOL(); err != nil && err != io.EOF {+ if err := p.skipToEOL(); err != nil && !errors.Is(err, io.EOF) {return nil, err
}
continue
@@ -182,7 +182,7 @@
func (p *configParser) skipBOM() error {first, _, err := p.reader.ReadRune()
- if err == io.EOF {+ if errors.Is(err, io.EOF) {return nil
}
if err != nil {@@ -332,7 +332,7 @@
for {ch, err := p.nextChar()
- if err == io.EOF {+ if errors.Is(err, io.EOF) { cfg.entries = append(cfg.entries, ConfigEntry{Section: p.currentSection,
Subsection: p.currentSubsec,
@@ -356,7 +356,7 @@
}
if ch == '#' || ch == ';' {- if err := p.skipToEOL(); err != nil && err != io.EOF {+ if err := p.skipToEOL(); err != nil && !errors.Is(err, io.EOF) {return err
}
cfg.entries = append(cfg.entries, ConfigEntry{@@ -400,7 +400,7 @@
for {ch, err := p.nextChar()
- if err == io.EOF {+ if errors.Is(err, io.EOF) { if inQuote { return "", errors.New("unexpected EOF in quoted value")}
@@ -448,7 +448,7 @@
if ch == '\\' {next, err := p.nextChar()
- if err == io.EOF {+ if errors.Is(err, io.EOF) { return "", errors.New("unexpected EOF after backslash")}
if err != nil {--- a/config/config_test.go
+++ b/config/config_test.go
@@ -26,7 +26,8 @@
}
func TestConfigAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})testRepo.Run(t, "config", "core.bare", "true")
testRepo.Run(t, "config", "core.filemode", "false")
@@ -57,7 +58,8 @@
}
func TestConfigSubsectionAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})testRepo.Run(t, "config", "remote.origin.url", "https://example.org/repo.git")
testRepo.Run(t, "config", "remote.origin.fetch", "+refs/heads/*:refs/remotes/origin/*")
@@ -80,7 +82,8 @@
}
func TestConfigMultiValueAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})testRepo.Run(t, "config", "--add", "remote.origin.fetch", "+refs/heads/main:refs/remotes/origin/main")
testRepo.Run(t, "config", "--add", "remote.origin.fetch", "+refs/heads/dev:refs/remotes/origin/dev")
@@ -113,7 +116,8 @@
}
func TestConfigCaseInsensitiveAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})testRepo.Run(t, "config", "Core.Bare", "true")
testRepo.Run(t, "config", "CORE.FileMode", "false")
@@ -142,7 +146,8 @@
}
func TestConfigBooleanAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})testRepo.Run(t, "config", "test.flag1", "true")
testRepo.Run(t, "config", "test.flag2", "false")
@@ -176,7 +181,8 @@
}
func TestConfigComplexValuesAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})testRepo.Run(t, "config", "test.spaced", "value with spaces")
testRepo.Run(t, "config", "test.special", "value=with=equals")
@@ -202,7 +208,8 @@
}
func TestConfigEntriesAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})testRepo.Run(t, "config", "core.bare", "true")
testRepo.Run(t, "config", "core.filemode", "false")
@@ -238,6 +245,7 @@
}
func TestConfigErrorCases(t *testing.T) {+ t.Parallel()
tests := []struct {name string
config string
@@ -266,6 +274,7 @@
for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {+ t.Parallel()
r := strings.NewReader(tt.config)
_, err := config.ParseConfig(r)
if err == nil {--- a/diff/diff.go
+++ b/diff/diff.go
@@ -1,3 +1,2 @@
// Package diff encapsulates diff-providing subpackages for direct use.
package diff
-
--- a/diff/lines/diff.go
+++ b/diff/lines/diff.go
@@ -64,11 +64,11 @@
newIDs[i] = id
}
- max := n + m
- offset := max
- trace := make([][]int, 0, max+1)
+ maxDist := n + m
+ offset := maxDist
+ trace := make([][]int, 0, maxDist+1)
- Vprev := make([]int, 2*max+1)
+ Vprev := make([]int, 2*maxDist+1)
for i := range Vprev {Vprev[i] = -1
}
@@ -84,8 +84,8 @@
found := x0 >= n && y0 >= m
- for D := 1; D <= max && !found; D++ {- V := make([]int, 2*max+1)
+ for D := 1; D <= maxDist && !found; D++ {+ V := make([]int, 2*maxDist+1)
for i := range V {V[i] = -1
}
--- a/diff/lines/diff_test.go
+++ b/diff/lines/diff_test.go
@@ -1,4 +1,4 @@
-package lines
+package lines_test
import (
"bytes"
@@ -5,6 +5,8 @@
"strconv"
"strings"
"testing"
+
+ "codeberg.org/lindenii/furgit/diff/lines"
)
func TestDiff(t *testing.T) {@@ -14,20 +16,20 @@
name string
oldInput string
newInput string
- expected []Chunk
+ expected []lines.Chunk
}{ {name: "empty inputs produce no chunks",
oldInput: "",
newInput: "",
- expected: []Chunk{},+ expected: []lines.Chunk{},},
{name: "only additions",
oldInput: "",
newInput: "alpha\nbeta\n",
- expected: []Chunk{- {Kind: ChunkKindAdded, Data: []byte("alpha\nbeta\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindAdded, Data: []byte("alpha\nbeta\n")},},
},
{@@ -34,8 +36,8 @@
name: "only deletions",
oldInput: "alpha\nbeta\n",
newInput: "",
- expected: []Chunk{- {Kind: ChunkKindDeleted, Data: []byte("alpha\nbeta\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindDeleted, Data: []byte("alpha\nbeta\n")},},
},
{@@ -42,8 +44,8 @@
name: "unchanged content is grouped",
oldInput: "same\nlines\n",
newInput: "same\nlines\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("same\nlines\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("same\nlines\n")},},
},
{@@ -50,10 +52,10 @@
name: "insertion in the middle",
oldInput: "a\nb\nc\n",
newInput: "a\nb\nX\nc\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("a\nb\n")},- {Kind: ChunkKindAdded, Data: []byte("X\n")},- {Kind: ChunkKindUnchanged, Data: []byte("c\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("a\nb\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("X\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("c\n")},},
},
{@@ -60,10 +62,10 @@
name: "replacement without trailing newline",
oldInput: "first\nsecond",
newInput: "first\nsecond\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("first\n")},- {Kind: ChunkKindDeleted, Data: []byte("second")},- {Kind: ChunkKindAdded, Data: []byte("second\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("first\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("second")},+ {Kind: lines.ChunkKindAdded, Data: []byte("second\n")},},
},
{@@ -70,11 +72,11 @@
name: "line replacement",
oldInput: "a\nb\nc\n",
newInput: "a\nB\nc\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("a\n")},- {Kind: ChunkKindDeleted, Data: []byte("b\n")},- {Kind: ChunkKindAdded, Data: []byte("B\n")},- {Kind: ChunkKindUnchanged, Data: []byte("c\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("a\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("b\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("B\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("c\n")},},
},
{@@ -81,10 +83,10 @@
name: "swap adjacent lines",
oldInput: "A\nB\n",
newInput: "B\nA\n",
- expected: []Chunk{- {Kind: ChunkKindDeleted, Data: []byte("A\n")},- {Kind: ChunkKindUnchanged, Data: []byte("B\n")},- {Kind: ChunkKindAdded, Data: []byte("A\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindDeleted, Data: []byte("A\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("B\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("A\n")},},
},
{@@ -91,11 +93,11 @@
name: "indentation change is a full line replacement",
oldInput: "func main() {\n\treturn\n}\n", newInput: "func main() {\n return\n}\n",- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("func main() {\n")},- {Kind: ChunkKindDeleted, Data: []byte("\treturn\n")},- {Kind: ChunkKindAdded, Data: []byte(" return\n")},- {Kind: ChunkKindUnchanged, Data: []byte("}\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("func main() {\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("\treturn\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte(" return\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("}\n")},},
},
{@@ -102,9 +104,9 @@
name: "commenting out lines",
oldInput: "code\n",
newInput: "// code\n",
- expected: []Chunk{- {Kind: ChunkKindDeleted, Data: []byte("code\n")},- {Kind: ChunkKindAdded, Data: []byte("// code\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindDeleted, Data: []byte("code\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("// code\n")},},
},
{@@ -111,9 +113,9 @@
name: "reducing repeating lines",
oldInput: "log\nlog\nlog\n",
newInput: "log\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("log\n")},- {Kind: ChunkKindDeleted, Data: []byte("log\nlog\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("log\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("log\nlog\n")},},
},
{@@ -120,9 +122,9 @@
name: "expanding repeating lines",
oldInput: "tick\n",
newInput: "tick\ntick\ntick\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("tick\n")},- {Kind: ChunkKindAdded, Data: []byte("tick\ntick\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("tick\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("tick\ntick\n")},},
},
{@@ -129,13 +131,13 @@
name: "interleaved modifications",
oldInput: "keep\nchange\nkeep\nchange\n",
newInput: "keep\nfixed\nkeep\nfixed\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("keep\n")},- {Kind: ChunkKindDeleted, Data: []byte("change\n")},- {Kind: ChunkKindAdded, Data: []byte("fixed\n")},- {Kind: ChunkKindUnchanged, Data: []byte("keep\n")},- {Kind: ChunkKindDeleted, Data: []byte("change\n")},- {Kind: ChunkKindAdded, Data: []byte("fixed\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("keep\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("change\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("fixed\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("keep\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("change\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("fixed\n")},},
},
{@@ -142,11 +144,11 @@
name: "large common header and footer",
oldInput: "header\nheader\nheader\nOLD\nfooter\nfooter\n",
newInput: "header\nheader\nheader\nNEW\nfooter\nfooter\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("header\nheader\nheader\n")},- {Kind: ChunkKindDeleted, Data: []byte("OLD\n")},- {Kind: ChunkKindAdded, Data: []byte("NEW\n")},- {Kind: ChunkKindUnchanged, Data: []byte("footer\nfooter\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("header\nheader\nheader\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("OLD\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("NEW\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("footer\nfooter\n")},},
},
{@@ -153,9 +155,9 @@
name: "completely different content",
oldInput: "apple\nbanana\n",
newInput: "cherry\ndate\n",
- expected: []Chunk{- {Kind: ChunkKindDeleted, Data: []byte("apple\nbanana\n")},- {Kind: ChunkKindAdded, Data: []byte("cherry\ndate\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindDeleted, Data: []byte("apple\nbanana\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("cherry\ndate\n")},},
},
{@@ -162,10 +164,10 @@
name: "unicode and emoji changes",
oldInput: "Hello 🌍\nYay\n",
newInput: "Hello 🌎\nYay\n",
- expected: []Chunk{- {Kind: ChunkKindDeleted, Data: []byte("Hello 🌍\n")},- {Kind: ChunkKindAdded, Data: []byte("Hello 🌎\n")},- {Kind: ChunkKindUnchanged, Data: []byte("Yay\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindDeleted, Data: []byte("Hello 🌍\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("Hello 🌎\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("Yay\n")},},
},
{@@ -172,10 +174,10 @@
name: "binary data with embedded newlines",
oldInput: "\x00\x01\n\x02\x03\n",
newInput: "\x00\x01\n\x02\xFF\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("\x00\x01\n")},- {Kind: ChunkKindDeleted, Data: []byte("\x02\x03\n")},- {Kind: ChunkKindAdded, Data: []byte("\x02\xFF\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("\x00\x01\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("\x02\x03\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("\x02\xFF\n")},},
},
{@@ -182,10 +184,10 @@
name: "adding trailing newline to last line",
oldInput: "Line 1\nLine 2",
newInput: "Line 1\nLine 2\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("Line 1\n")},- {Kind: ChunkKindDeleted, Data: []byte("Line 2")},- {Kind: ChunkKindAdded, Data: []byte("Line 2\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("Line 1\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("Line 2")},+ {Kind: lines.ChunkKindAdded, Data: []byte("Line 2\n")},},
},
{@@ -192,10 +194,10 @@
name: "removing trailing newline",
oldInput: "A\nB\n",
newInput: "A\nB",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("A\n")},- {Kind: ChunkKindDeleted, Data: []byte("B\n")},- {Kind: ChunkKindAdded, Data: []byte("B")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("A\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("B\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("B")},},
},
{@@ -202,10 +204,10 @@
name: "inserting blank lines",
oldInput: "A\nB\n",
newInput: "A\n\n\nB\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("A\n")},- {Kind: ChunkKindAdded, Data: []byte("\n\n")},- {Kind: ChunkKindUnchanged, Data: []byte("B\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("A\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("\n\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("B\n")},},
},
{@@ -212,10 +214,10 @@
name: "collapsing blank lines",
oldInput: "A\n\n\n\nB\n",
newInput: "A\nB\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("A\n")},- {Kind: ChunkKindDeleted, Data: []byte("\n\n\n")},- {Kind: ChunkKindUnchanged, Data: []byte("B\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("A\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("\n\n\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("B\n")},},
},
{@@ -222,10 +224,10 @@
name: "case sensitivity check",
oldInput: "FOO\nbar\n",
newInput: "foo\nbar\n",
- expected: []Chunk{- {Kind: ChunkKindDeleted, Data: []byte("FOO\n")},- {Kind: ChunkKindAdded, Data: []byte("foo\n")},- {Kind: ChunkKindUnchanged, Data: []byte("bar\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindDeleted, Data: []byte("FOO\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("foo\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("bar\n")},},
},
{@@ -232,9 +234,9 @@
name: "partial line match is full mismatch",
oldInput: "The quick brown fox\n",
newInput: "The quick brown fox jumps\n",
- expected: []Chunk{- {Kind: ChunkKindDeleted, Data: []byte("The quick brown fox\n")},- {Kind: ChunkKindAdded, Data: []byte("The quick brown fox jumps\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindDeleted, Data: []byte("The quick brown fox\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("The quick brown fox jumps\n")},},
},
{@@ -241,10 +243,10 @@
name: "inserting middle content",
oldInput: "Top\nBottom\n",
newInput: "Top\nMiddle\nBottom\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("Top\n")},- {Kind: ChunkKindAdded, Data: []byte("Middle\n")},- {Kind: ChunkKindUnchanged, Data: []byte("Bottom\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("Top\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("Middle\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("Bottom\n")},},
},
{@@ -251,11 +253,11 @@
name: "block move simulated",
oldInput: "BlockA\nBlockB\nBlockC\n",
newInput: "BlockA\nBlockC\nBlockB\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("BlockA\n")},- {Kind: ChunkKindDeleted, Data: []byte("BlockB\n")},- {Kind: ChunkKindUnchanged, Data: []byte("BlockC\n")},- {Kind: ChunkKindAdded, Data: []byte("BlockB\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("BlockA\n")},+ {Kind: lines.ChunkKindDeleted, Data: []byte("BlockB\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("BlockC\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("BlockB\n")},},
},
{@@ -262,12 +264,12 @@
name: "alternating additions",
oldInput: "A\nB\nC\n",
newInput: "A\n1\nB\n2\nC\n",
- expected: []Chunk{- {Kind: ChunkKindUnchanged, Data: []byte("A\n")},- {Kind: ChunkKindAdded, Data: []byte("1\n")},- {Kind: ChunkKindUnchanged, Data: []byte("B\n")},- {Kind: ChunkKindAdded, Data: []byte("2\n")},- {Kind: ChunkKindUnchanged, Data: []byte("C\n")},+ expected: []lines.Chunk{+ {Kind: lines.ChunkKindUnchanged, Data: []byte("A\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("1\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("B\n")},+ {Kind: lines.ChunkKindAdded, Data: []byte("2\n")},+ {Kind: lines.ChunkKindUnchanged, Data: []byte("C\n")},},
},
}
@@ -276,7 +278,7 @@
t.Run(tt.name, func(t *testing.T) {t.Parallel()
- chunks, err := Diff([]byte(tt.oldInput), []byte(tt.newInput))
+ chunks, err := lines.Diff([]byte(tt.oldInput), []byte(tt.newInput))
if err != nil { t.Fatalf("Diff returned error: %v", err)}
@@ -297,7 +299,7 @@
}
}
-func formatChunks(chunks []Chunk) string {+func formatChunks(chunks []lines.Chunk) string {var b strings.Builder
b.WriteByte('[') for i, chunk := range chunks {@@ -312,13 +314,13 @@
return b.String()
}
-func chunkKindName(kind ChunkKind) string {+func chunkKindName(kind lines.ChunkKind) string { switch kind {- case ChunkKindUnchanged:
+ case lines.ChunkKindUnchanged:
return "U"
- case ChunkKindDeleted:
+ case lines.ChunkKindDeleted:
return "D"
- case ChunkKindAdded:
+ case lines.ChunkKindAdded:
return "A"
default:
return "?"
--- a/diff/trees/diff_test.go
+++ b/diff/trees/diff_test.go
@@ -15,7 +15,8 @@
)
func TestDiffComplexNestedChanges(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper repo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: false})writeTestFile(t, filepath.Join(repo.Dir(), "README.md"), "initial readme\n")
@@ -98,7 +99,8 @@
}
func TestDiffDirectoryAddDeleteDeep(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper repo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: false})writeTestFile(t, filepath.Join(repo.Dir(), "old_dir", "old.txt"), "stale directory\n")
--- a/go.mod
+++ b/go.mod
@@ -1,3 +1,3 @@
module codeberg.org/lindenii/furgit
-go 1.26.0
+go 1.25.0
--- a/internal/cache/lru/lru.go
+++ b/internal/cache/lru/lru.go
@@ -90,6 +90,7 @@
return zero, false
}
cache.lru.MoveToBack(elem)
+ //nolint:forcetypeassert
return elem.Value.(*entry[K, V]).value, true
}
@@ -100,6 +101,7 @@
var zero V
return zero, false
}
+ //nolint:forcetypeassert
return elem.Value.(*entry[K, V]).value, true
}
@@ -161,6 +163,7 @@
}
func (cache *Cache[K, V]) removeElem(elem *list.Element) *entry[K, V] {+ //nolint:forcetypeassert
ent := elem.Value.(*entry[K, V])
cache.lru.Remove(elem)
delete(cache.items, ent.key)
--- a/internal/cache/lru/lru_test.go
+++ b/internal/cache/lru/lru_test.go
@@ -169,6 +169,7 @@
t.Parallel()
t.Run("negative max", func(t *testing.T) {+ t.Parallel()
defer func() { if recover() == nil { t.Fatalf("expected panic")@@ -178,6 +179,7 @@
})
t.Run("nil weight function", func(t *testing.T) {+ t.Parallel()
defer func() { if recover() == nil { t.Fatalf("expected panic")@@ -187,6 +189,7 @@
})
t.Run("negative entry weight", func(t *testing.T) {+ t.Parallel()
cache := lru.New[string, testValue](10, func(_ string, _ testValue) int64 {return -1
}, nil)
@@ -199,6 +202,7 @@
})
t.Run("set negative max", func(t *testing.T) {+ t.Parallel()
cache := lru.New[string, testValue](10, weightFn, nil)
defer func() { if recover() == nil {--- /dev/null
+++ b/internal/intconv/intconv.go
@@ -1,0 +1,39 @@
+// Package intconv provides checked integer conversion helpers.
+package intconv
+
+import (
+ "fmt"
+ "math"
+)
+
+// Uint64ToInt converts v to int, returning an error if it overflows.
+func Uint64ToInt(v uint64) (int, error) {+ if v > uint64(math.MaxInt) {+ return 0, fmt.Errorf("intconv: uint64 %d overflows int", v)+ }
+ return int(v), nil
+}
+
+// UintptrToInt converts v to int, returning an error if it overflows.
+func UintptrToInt(v uintptr) (int, error) {+ if v > uintptr(math.MaxInt) {+ return 0, fmt.Errorf("intconv: uintptr %d overflows int", v)+ }
+ return int(v), nil
+}
+
+// IntToUint64 converts v to uint64, returning an error if v is negative.
+func IntToUint64(v int) (uint64, error) {+ if v < 0 {+ return 0, fmt.Errorf("intconv: int %d is negative", v)+ }
+ return uint64(v), nil
+}
+
+// Int64ToInt32 converts v to int32, returning an error if it overflows.
+func Int64ToInt32(v int64) (int32, error) {+ if v < math.MinInt32 || v > math.MaxInt32 {+ return 0, fmt.Errorf("intconv: int64 %d overflows int32", v)+ }
+ return int32(v), nil
+}
--- a/internal/internal.go
+++ b/internal/internal.go
@@ -1,3 +1,2 @@
// Package internal provides private packages and helpers.
package internal
-
--- a/internal/testgit/repo_commit_tree.go
+++ b/internal/testgit/repo_commit_tree.go
@@ -9,7 +9,8 @@
// CommitTree creates a commit from a tree and message, optionally with parents.
func (testRepo *TestRepo) CommitTree(tb testing.TB, tree objectid.ObjectID, message string, parents ...objectid.ObjectID) objectid.ObjectID {tb.Helper()
- args := []string{"commit-tree", tree.String()}+ args := make([]string, 0, 2+2*len(parents)+2)
+ args = append(args, "commit-tree", tree.String())
for _, p := range parents {args = append(args, "-p", p.String())
}
--- a/internal/testgit/repo_new.go
+++ b/internal/testgit/repo_new.go
@@ -26,11 +26,7 @@
tb.Fatalf("invalid algorithm: %v", algo)}
- dir, err := os.MkdirTemp("", "furgit-testgit-*")- if err != nil {- tb.Fatalf("create temp dir: %v", err)- }
- tb.Cleanup(func() { _ = os.RemoveAll(dir) })+ dir := tb.TempDir()
testRepo := &TestRepo{dir: dir,
--- a/internal/testgit/repo_run.go
+++ b/internal/testgit/repo_run.go
@@ -35,7 +35,8 @@
func (testRepo *TestRepo) runBytes(tb testing.TB, stdin []byte, dir string, args ...string) []byte {tb.Helper()
- cmd := exec.Command("git", args...)+ //nolint:noctx
+ cmd := exec.Command("git", args...) //#nosec G204cmd.Dir = dir
cmd.Env = testRepo.env
if stdin != nil {--- a/object/blob_parse_test.go
+++ b/object/blob_parse_test.go
@@ -10,7 +10,8 @@
)
func TestBlobParseFromGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true}) body := []byte("hello\nblob\n")blobID := testRepo.HashObject(t, "blob", body)
--- a/object/blob_serialize_test.go
+++ b/object/blob_serialize_test.go
@@ -9,7 +9,8 @@
)
func TestBlobSerialize(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true}) body := []byte("hello\nblob\n")wantID := testRepo.HashObject(t, "blob", body)
--- a/object/commit_parse_test.go
+++ b/object/commit_parse_test.go
@@ -10,7 +10,8 @@
)
func TestCommitParseFromGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})_, treeID, commitID := testRepo.MakeCommit(t, "subject\n\nbody")
--- a/object/commit_serialize_test.go
+++ b/object/commit_serialize_test.go
@@ -9,7 +9,8 @@
)
func TestCommitSerialize(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})_, _, commitID := testRepo.MakeCommit(t, "subject\n\nbody")
--- a/object/ident.go
+++ b/object/ident.go
@@ -4,10 +4,11 @@
"bytes"
"errors"
"fmt"
- "math"
"strconv"
"strings"
"time"
+
+ "codeberg.org/lindenii/furgit/internal/intconv"
)
// Ident represents a Git identity (author/committer/tagger).
@@ -76,11 +77,10 @@
return nil, errors.New("object: ident: invalid timezone minutes range")}
total := int64(hh)*60 + int64(mm)
- if total > math.MaxInt32 {+ offset, err := intconv.Int64ToInt32(total)
+ if err != nil { return nil, errors.New("object: ident: timezone overflow")}
-
- offset := int32(total)
if sign < 0 {offset = -offset
}
--- a/object/parse.go
+++ b/object/parse.go
@@ -19,6 +19,8 @@
return ParseCommit(body, algo)
case objecttype.TypeTag:
return ParseTag(body, algo)
+ case objecttype.TypeInvalid, objecttype.TypeFuture, objecttype.TypeOfsDelta, objecttype.TypeRefDelta:
+ return nil, fmt.Errorf("object: unsupported object type %d", ty)default:
return nil, fmt.Errorf("object: unsupported object type %d", ty)}
--- a/object/tag_parse_test.go
+++ b/object/tag_parse_test.go
@@ -11,7 +11,8 @@
)
func TestTagParseFromGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})_, _, commitID := testRepo.MakeCommit(t, "subject\n\nbody")
tagID := testRepo.TagAnnotated(t, "v1", commitID, "tag message")
--- a/object/tag_serialize_test.go
+++ b/object/tag_serialize_test.go
@@ -9,7 +9,8 @@
)
func TestTagSerialize(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})_, _, commitID := testRepo.MakeCommit(t, "subject\n\nbody")
tagID := testRepo.TagAnnotated(t, "v1", commitID, "tag message")
--- a/object/tree.go
+++ b/object/tree.go
@@ -49,27 +49,6 @@
return tree.entry(name, false)
}
-func (tree *Tree) entry(name []byte, searchIsTree bool) *TreeEntry {- low, high := 0, len(tree.Entries)-1
- for low <= high {- mid := low + (high-low)/2
- entry := &tree.Entries[mid]
- cmp := TreeEntryNameCompare(entry.Name, entry.Mode, name, searchIsTree)
- if cmp == 0 {- if bytes.Equal(entry.Name, name) {- return entry
- }
- return nil
- }
- if cmp < 0 {- low = mid + 1
- } else {- high = mid - 1
- }
- }
- return nil
-}
-
// InsertEntry inserts a tree entry while preserving Git ordering.
func (tree *Tree) InsertEntry(newEntry TreeEntry) error { if tree.entry(newEntry.Name, true) != nil || tree.entry(newEntry.Name, false) != nil {@@ -100,6 +79,27 @@
return fmt.Errorf("object: tree: entry %q not found", name)}
+func (tree *Tree) entry(name []byte, searchIsTree bool) *TreeEntry {+ low, high := 0, len(tree.Entries)-1
+ for low <= high {+ mid := low + (high-low)/2
+ entry := &tree.Entries[mid]
+ cmp := TreeEntryNameCompare(entry.Name, entry.Mode, name, searchIsTree)
+ if cmp == 0 {+ if bytes.Equal(entry.Name, name) {+ return entry
+ }
+ return nil
+ }
+ if cmp < 0 {+ low = mid + 1
+ } else {+ high = mid - 1
+ }
+ }
+ return nil
+}
+
// TreeEntryNameCompare compares names using Git tree ordering rules.
func TreeEntryNameCompare(entryName []byte, entryMode FileMode, searchName []byte, searchIsTree bool) int {isEntryTree := entryMode == FileModeDir
@@ -115,7 +115,7 @@
n := min(searchLen, entryLen)
- for i := 0; i < n; i++ {+ for i := range n {var ec, sc byte
if i < len(entryName) {ec = entryName[i]
--- a/object/tree_helpers_test.go
+++ b/object/tree_helpers_test.go
@@ -10,30 +10,15 @@
"codeberg.org/lindenii/furgit/object"
)
-func mktreeTypeFromMode(t *testing.T, mode object.FileMode) string {- t.Helper()
- switch mode {- case object.FileModeDir:
- return "tree"
- case object.FileModeRegular, object.FileModeExecutable, object.FileModeSymlink:
- return "blob"
- case object.FileModeGitlink:
- return "commit"
- default:
- t.Fatalf("unsupported file mode: %o", mode)- return ""
- }
-}
-
func buildGitMktreeInput(entries []object.TreeEntry) string {var b strings.Builder
for _, e := range entries {- fmt.Fprintf(&b, "%o %s %s\t%s\n", e.Mode, mktreeTypeFromModeNoTB(e.Mode), e.ID.String(), e.Name)
+ fmt.Fprintf(&b, "%o %s %s\t%s\n", e.Mode, mktreeTypeFromMode(e.Mode), e.ID.String(), e.Name)
}
return b.String()
}
-func mktreeTypeFromModeNoTB(mode object.FileMode) string {+func mktreeTypeFromMode(mode object.FileMode) string { switch mode {case object.FileModeDir:
return "tree"
--- a/object/tree_parse_test.go
+++ b/object/tree_parse_test.go
@@ -10,7 +10,8 @@
)
func TestTreeParseFromGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})entries := adversarialRootEntries(t, testRepo)
inserted := &object.Tree{}--- a/object/tree_serialize_test.go
+++ b/object/tree_serialize_test.go
@@ -9,7 +9,8 @@
)
func TestTreeSerialize(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})entries := adversarialRootEntries(t, testRepo)
tree := &object.Tree{}--- a/objectid/objectid.go
+++ b/objectid/objectid.go
@@ -2,7 +2,7 @@
package objectid
import (
- "crypto/sha1"
+ "crypto/sha1" //#nosec G505
"crypto/sha256"
"encoding/hex"
"errors"
@@ -42,15 +42,13 @@
name: "sha1",
size: sha1.Size,
sum: func(data []byte) ObjectID {- sum := sha1.Sum(data)
+ sum := sha1.Sum(data) //#nosec G401
var id ObjectID
copy(id.data[:], sum[:])
id.algo = AlgorithmSHA1
return id
},
- new: func() hash.Hash {- return sha1.New()
- },
+ new: sha1.New,
},
AlgorithmSHA256: {name: "sha256",
@@ -62,9 +60,7 @@
id.algo = AlgorithmSHA256
return id
},
- new: func() hash.Hash {- return sha256.New()
- },
+ new: sha256.New,
},
}
@@ -72,20 +68,16 @@
var supportedAlgorithms []Algorithm
func init() {- for algo, info := range algorithmTable {+ for algo := Algorithm(0); int(algo) < len(algorithmTable); algo++ {+ info := algorithmTable[algo]
if info.name == "" {continue
}
- parsed := Algorithm(algo)
- algorithmByName[info.name] = parsed
- supportedAlgorithms = append(supportedAlgorithms, parsed)
+ algorithmByName[info.name] = algo
+ supportedAlgorithms = append(supportedAlgorithms, algo)
}
}
-func (algo Algorithm) info() algorithmDetails {- return algorithmTable[algo]
-}
-
// SupportedAlgorithms returns all object ID algorithms supported by furgit.
// Do not mutate.
func SupportedAlgorithms() []Algorithm {@@ -131,7 +123,13 @@
return newFn(), nil
}
+func (algo Algorithm) info() algorithmDetails {+ return algorithmTable[algo]
+}
+
// ObjectID represents a Git object ID.
+//
+//nolint:recvcheck
type ObjectID struct {algo Algorithm
data [maxObjectIDSize]byte
@@ -184,7 +182,7 @@
}
decoded, err := hex.DecodeString(s)
if err != nil {- return id, fmt.Errorf("%w: decode: %v", ErrInvalidObjectID, err)+ return id, fmt.Errorf("%w: decode: %w", ErrInvalidObjectID, err)}
copy(id.data[:], decoded)
id.algo = algo
--- a/objectid/objectid_test.go
+++ b/objectid/objectid_test.go
@@ -47,6 +47,7 @@
for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {+ t.Parallel()
id, err := objectid.ParseHex(tt.algo, tt.hex)
if err != nil { t.Fatalf("ParseHex failed: %v", err)@@ -90,6 +91,7 @@
for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {+ t.Parallel()
if _, err := objectid.ParseHex(tt.algo, tt.hex); err == nil { t.Fatalf("expected ParseHex error")}
--- a/objectstore/loose/read_test.go
+++ b/objectstore/loose/read_test.go
@@ -14,7 +14,8 @@
)
func TestLooseStoreReadAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true}) blobID := testRepo.HashObject(t, "blob", []byte("blob body\n"))_, treeID, commitID := testRepo.MakeCommit(t, "subject\n\nbody")
@@ -93,7 +94,8 @@
}
func TestLooseStoreErrors(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})store := openLooseStore(t, testRepo.Dir(), algo)
@@ -136,6 +138,7 @@
}
func TestLooseStoreNewValidation(t *testing.T) {+ t.Parallel()
root, err := os.OpenRoot(t.TempDir())
if err != nil { t.Fatalf("OpenRoot: %v", err)--- a/objectstore/loose/write_test.go
+++ b/objectstore/loose/write_test.go
@@ -12,7 +12,8 @@
)
func TestLooseStoreWriteWriterContentAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})store := openLooseStore(t, testRepo.Dir(), algo)
@@ -68,7 +69,8 @@
}
func TestLooseStoreWriteWriterFullAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})store := openLooseStore(t, testRepo.Dir(), algo)
@@ -108,11 +110,13 @@
}
func TestLooseStoreWriterValidationErrors(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {- testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})- store := openLooseStore(t, testRepo.Dir(), algo)
-
+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper t.Run("content overflow", func(t *testing.T) {+ t.Parallel()
+ testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})+ store := openLooseStore(t, testRepo.Dir(), algo)
+
writer, finalize, err := store.WriteWriterContent(objecttype.TypeBlob, 1)
if err != nil { t.Fatalf("WriteWriterContent: %v", err)@@ -127,6 +131,10 @@
})
t.Run("content short", func(t *testing.T) {+ t.Parallel()
+ testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})+ store := openLooseStore(t, testRepo.Dir(), algo)
+
writer, finalize, err := store.WriteWriterContent(objecttype.TypeBlob, 5)
if err != nil { t.Fatalf("WriteWriterContent: %v", err)@@ -143,6 +151,10 @@
})
t.Run("full malformed header", func(t *testing.T) {+ t.Parallel()
+ testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})+ store := openLooseStore(t, testRepo.Dir(), algo)
+
writer, finalize, err := store.WriteWriterFull()
if err != nil { t.Fatalf("WriteWriterFull: %v", err)@@ -159,6 +171,10 @@
})
t.Run("full size mismatch", func(t *testing.T) {+ t.Parallel()
+ testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})+ store := openLooseStore(t, testRepo.Dir(), algo)
+
writer, finalize, err := store.WriteWriterFull()
if err != nil { t.Fatalf("WriteWriterFull: %v", err)--- a/objectstore/loose/write_writer.go
+++ b/objectstore/loose/write_writer.go
@@ -137,48 +137,6 @@
return len(src), nil
}
-// acceptFull validates and accounts raw full-object input.
-func (writer *streamWriter) acceptFull(src []byte) error {- if !writer.headerDone {- if nul := bytes.IndexByte(src, 0); nul >= 0 {- headerChunkLen := nul + 1
- writer.headerBuf = append(writer.headerBuf, src[:headerChunkLen]...)
- _, size, _, ok := objectheader.Parse(writer.headerBuf)
- if !ok {- return errors.New("objectstore/loose: malformed object header")- }
- writer.headerDone = true
- writer.expectedContentLeft = size
- return writer.acceptContent(int64(len(src) - headerChunkLen))
- }
-
- writer.headerBuf = append(writer.headerBuf, src...)
- return nil
- }
-
- return writer.acceptContent(int64(len(src)))
-}
-
-// acceptContent validates and accounts content byte counts.
-func (writer *streamWriter) acceptContent(n int64) error {- if n > writer.expectedContentLeft {- return errors.New("objectstore/loose: object content exceeds declared size")- }
- writer.expectedContentLeft -= n
- return nil
-}
-
-// writeRawChunk forwards raw bytes to the hash and deflate pipeline.
-func (writer *streamWriter) writeRawChunk(src []byte) error {- if _, err := writer.hash.Write(src); err != nil {- return err
- }
- if _, err := writer.zw.Write(src); err != nil {- return err
- }
- return nil
-}
-
// Close flushes and closes the underlying zlib stream and temp file.
// It is safe to call multiple times.
func (writer *streamWriter) Close() error {@@ -261,6 +219,48 @@
writer.finalID = id
cleanup = false
return id, nil
+}
+
+// acceptFull validates and accounts raw full-object input.
+func (writer *streamWriter) acceptFull(src []byte) error {+ if !writer.headerDone {+ if nul := bytes.IndexByte(src, 0); nul >= 0 {+ headerChunkLen := nul + 1
+ writer.headerBuf = append(writer.headerBuf, src[:headerChunkLen]...)
+ _, size, _, ok := objectheader.Parse(writer.headerBuf)
+ if !ok {+ return errors.New("objectstore/loose: malformed object header")+ }
+ writer.headerDone = true
+ writer.expectedContentLeft = size
+ return writer.acceptContent(int64(len(src) - headerChunkLen))
+ }
+
+ writer.headerBuf = append(writer.headerBuf, src...)
+ return nil
+ }
+
+ return writer.acceptContent(int64(len(src)))
+}
+
+// acceptContent validates and accounts content byte counts.
+func (writer *streamWriter) acceptContent(n int64) error {+ if n > writer.expectedContentLeft {+ return errors.New("objectstore/loose: object content exceeds declared size")+ }
+ writer.expectedContentLeft -= n
+ return nil
+}
+
+// writeRawChunk forwards raw bytes to the hash and deflate pipeline.
+func (writer *streamWriter) writeRawChunk(src []byte) error {+ if _, err := writer.hash.Write(src); err != nil {+ return err
+ }
+ if _, err := writer.zw.Write(src); err != nil {+ return err
+ }
+ return nil
}
// createTempObjectFile creates a unique temporary object file within dir.
--- a/objectstore/objectstore.go
+++ b/objectstore/objectstore.go
@@ -10,7 +10,7 @@
)
// ErrObjectNotFound indicates that an object does not exist in a backend.
-// TODO: This might need to be an interface or otherwise be able to encapsulate multiple concrete backends'
+// TODO: This might need to be an interface or otherwise be able to encapsulate multiple concrete backends'.
var ErrObjectNotFound = errors.New("objectstore: object not found")// Store reads Git objects by object ID.
--- a/objectstore/packed/delta_plan.go
+++ b/objectstore/packed/delta_plan.go
@@ -74,6 +74,10 @@
packName: current.packName,
offset: meta.baseOfs,
}
+ case objecttype.TypeCommit, objecttype.TypeTree, objecttype.TypeBlob, objecttype.TypeTag:
+ return deltaPlan{}, fmt.Errorf("objectstore/packed: internal invariant violation for base type %d", meta.ty)+ case objecttype.TypeInvalid, objecttype.TypeFuture:
+ return deltaPlan{}, fmt.Errorf("objectstore/packed: unsupported pack type %d", meta.ty)default:
return deltaPlan{}, fmt.Errorf("objectstore/packed: unsupported pack type %d", meta.ty)}
--- a/objectstore/packed/entry_parse.go
+++ b/objectstore/packed/entry_parse.go
@@ -3,6 +3,7 @@
import (
"fmt"
+ "codeberg.org/lindenii/furgit/internal/intconv"
"codeberg.org/lindenii/furgit/objectid"
"codeberg.org/lindenii/furgit/objecttype"
)
@@ -28,7 +29,10 @@
return zero, fmt.Errorf("objectstore/packed: pack %q offset %d out of bounds", pack.name, offset)}
- pos := int(offset)
+ pos, err := intconv.Uint64ToInt(offset)
+ if err != nil {+ return zero, fmt.Errorf("objectstore/packed: pack %q offset conversion: %w", pack.name, err)+ }
first := pack.data[pos]
pos++
@@ -76,6 +80,8 @@
return zero, fmt.Errorf("objectstore/packed: pack %q has invalid ofs-delta base", pack.name)}
meta.baseOfs = offset - dist
+ case objecttype.TypeInvalid, objecttype.TypeFuture:
+ return zero, fmt.Errorf("objectstore/packed: pack %q has unsupported object type %d", pack.name, meta.ty)default:
return zero, fmt.Errorf("objectstore/packed: pack %q has unsupported object type %d", pack.name, meta.ty)}
@@ -111,6 +117,8 @@
switch ty {case objecttype.TypeCommit, objecttype.TypeTree, objecttype.TypeBlob, objecttype.TypeTag:
return true
+ case objecttype.TypeInvalid, objecttype.TypeFuture, objecttype.TypeOfsDelta, objecttype.TypeRefDelta:
+ return false
default:
return false
}
--- a/objectstore/packed/idx_load.go
+++ b/objectstore/packed/idx_load.go
@@ -7,6 +7,7 @@
"strings"
"syscall"
+ "codeberg.org/lindenii/furgit/internal/intconv"
"codeberg.org/lindenii/furgit/objectid"
)
@@ -106,7 +107,12 @@
_ = file.Close()
return nil, fmt.Errorf("objectstore/packed: idx %q has unsupported size", idxName)}
- data, err := syscall.Mmap(int(file.Fd()), 0, int(size), syscall.PROT_READ, syscall.MAP_PRIVATE)
+ fd, err := intconv.UintptrToInt(file.Fd())
+ if err != nil {+ _ = file.Close()
+ return nil, err
+ }
+ data, err := syscall.Mmap(fd, 0, int(size), syscall.PROT_READ, syscall.MAP_PRIVATE)
if err != nil {_ = file.Close()
return nil, err
--- a/objectstore/packed/idx_parse.go
+++ b/objectstore/packed/idx_parse.go
@@ -62,10 +62,7 @@
return fmt.Errorf("objectstore/packed: idx %q has malformed 64-bit offset table", index.idxName)}
index.offset64Count = offset64Bytes / 8
- maxOffset64Count := index.numObjects - 1
- if maxOffset64Count < 0 {- maxOffset64Count = 0
- }
+ maxOffset64Count := max(index.numObjects-1, 0)
if index.offset64Count > maxOffset64Count { return fmt.Errorf("objectstore/packed: idx %q has oversized 64-bit offset table", index.idxName)}
--- a/objectstore/packed/pack.go
+++ b/objectstore/packed/pack.go
@@ -5,6 +5,8 @@
"fmt"
"os"
"syscall"
+
+ "codeberg.org/lindenii/furgit/internal/intconv"
)
const packSignature = 0x5041434b
@@ -27,7 +29,11 @@
if size > int64(int(^uint(0)>>1)) { return nil, fmt.Errorf("objectstore/packed: pack %q has unsupported size", name)}
- data, err := syscall.Mmap(int(file.Fd()), 0, int(size), syscall.PROT_READ, syscall.MAP_PRIVATE)
+ fd, err := intconv.UintptrToInt(file.Fd())
+ if err != nil {+ return nil, err
+ }
+ data, err := syscall.Mmap(fd, 0, int(size), syscall.PROT_READ, syscall.MAP_PRIVATE)
if err != nil {return nil, err
}
--- a/objectstore/packed/read_test.go
+++ b/objectstore/packed/read_test.go
@@ -14,12 +14,12 @@
)
func TestPackedStoreReadAgainstGit(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelpertestRepo, ids := createPackedFixtureRepo(t, algo)
store := openPackedStore(t, testRepo.Dir(), algo)
for _, id := range ids {- id := id
t.Run(id.String(), func(t *testing.T) {wantType, wantBody, wantRaw := expectedRawObject(t, testRepo, id)
@@ -80,7 +80,8 @@
}
func TestPackedStoreErrors(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelpertestRepo, _ := createPackedFixtureRepo(t, algo)
store := openPackedStore(t, testRepo.Dir(), algo)
@@ -125,6 +126,7 @@
}
func TestPackedStoreNewValidation(t *testing.T) {+ t.Parallel()
testRepo, _ := createPackedFixtureRepo(t, objectid.AlgorithmSHA1)
store := openPackedStore(t, testRepo.Dir(), objectid.AlgorithmSHA1)
if err := store.Close(); err != nil {@@ -136,6 +138,7 @@
}
func TestPackedStoreInvalidAlgorithm(t *testing.T) {+ t.Parallel()
testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: objectid.AlgorithmSHA1, Bare: true})root, err := os.OpenRoot(testRepo.Dir())
if err != nil {--- a/objecttype/objecttype.go
+++ b/objecttype/objecttype.go
@@ -49,6 +49,8 @@
return typeNameCommit, true
case TypeTag:
return typeNameTag, true
+ case TypeInvalid, TypeFuture, TypeOfsDelta, TypeRefDelta:
+ return "", false
default:
return "", false
}
--- a/refstore/loose/loose_test.go
+++ b/refstore/loose/loose_test.go
@@ -30,7 +30,8 @@
}
func TestLooseResolveAndResolveFully(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})_, _, commitID := testRepo.MakeCommit(t, "loose refs commit")
testRepo.UpdateRef(t, "refs/heads/main", commitID)
@@ -77,7 +78,8 @@
}
func TestLooseResolveFullyCycle(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})testRepo.SymbolicRef(t, "refs/heads/a", "refs/heads/b")
testRepo.SymbolicRef(t, "refs/heads/b", "refs/heads/a")
@@ -90,7 +92,8 @@
}
func TestLooseListPattern(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})_, _, commitID := testRepo.MakeCommit(t, "list refs commit")
testRepo.UpdateRef(t, "refs/heads/main", commitID)
@@ -131,7 +134,8 @@
}
func TestLooseMalformedDetachedRef(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})refPath := filepath.Join(testRepo.Dir(), "refs", "heads", "bad")
if err := os.MkdirAll(filepath.Dir(refPath), 0o755); err != nil {@@ -149,7 +153,8 @@
}
func TestLooseShorten(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})_, _, commitID := testRepo.MakeCommit(t, "shorten refs commit")
testRepo.UpdateRef(t, "refs/heads/main", commitID)
--- a/refstore/packed/packed_test.go
+++ b/refstore/packed/packed_test.go
@@ -17,7 +17,7 @@
func openPackedRefStoreFromRepo(t *testing.T, repoPath string, algo objectid.Algorithm) *packed.Store {t.Helper()
- file, err := os.Open(filepath.Join(repoPath, "packed-refs"))
+ file, err := os.Open(filepath.Join(repoPath, "packed-refs")) //#nosec G304
if err != nil { t.Fatalf("open packed-refs: %v", err)}
@@ -31,7 +31,8 @@
}
func TestPackedResolveAndPeeled(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})_, _, commitID := testRepo.MakeCommit(t, "packed refs commit")
testRepo.UpdateRef(t, "refs/heads/main", commitID)
@@ -85,7 +86,8 @@
}
func TestPackedListAndShorten(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper testRepo := testgit.NewRepo(t, testgit.RepoOptions{ObjectFormat: algo, Bare: true})_, _, commitID := testRepo.MakeCommit(t, "packed refs list commit")
testRepo.UpdateRef(t, "refs/heads/main", commitID)
@@ -132,7 +134,8 @@
}
func TestPackedParseErrors(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper cases := []struct {name string
data string
@@ -163,6 +166,7 @@
}
func TestPackedNewValidation(t *testing.T) {+ t.Parallel()
if _, err := packed.New(bytes.NewReader(nil), objectid.AlgorithmUnknown); !errors.Is(err, objectid.ErrInvalidAlgorithm) { t.Fatalf("packed.New invalid algorithm error = %v", err)}
--- a/refstore/refstore.go
+++ b/refstore/refstore.go
@@ -8,7 +8,7 @@
)
// ErrReferenceNotFound indicates that a reference does not exist in a backend.
-// TODO: interface error? just like object not found in objectstore
+// TODO: Interface error? Just like object not found in objectstore.
var ErrReferenceNotFound = errors.New("refstore: reference not found")// Store reads Git references.
--- a/refstore/reftable/lookup.go
+++ b/refstore/reftable/lookup.go
@@ -5,6 +5,7 @@
"fmt"
"strings"
+ "codeberg.org/lindenii/furgit/internal/intconv"
"codeberg.org/lindenii/furgit/objectid"
)
@@ -11,10 +12,14 @@
// resolveRecord resolves one ref name inside a single table file.
func (table *tableFile) resolveRecord(name string) (recordValue, bool, error) { if table.refIndexPos != 0 {- pos, ok, err := table.resolveRefBlockPosFromIndex(name, int(table.refIndexPos))
+ indexPos, err := intconv.Uint64ToInt(table.refIndexPos)
if err != nil { return recordValue{}, false, err}
+ pos, ok, err := table.resolveRefBlockPosFromIndex(name, indexPos)
+ if err != nil {+ return recordValue{}, false, err+ }
if !ok { return recordValue{}, false, nil}
@@ -204,10 +209,11 @@
return 0, false, err
}
if strings.Compare(key, name) <= 0 {- if childPos > uint64(int(^uint(0)>>1)) {- return 0, false, fmt.Errorf("index child position overflows int")+ childPosInt, err := intconv.Uint64ToInt(childPos)
+ if err != nil {+ return 0, false, fmt.Errorf("index child position conversion: %w", err)}
- return int(childPos), true, nil
+ return childPosInt, true, nil
}
prev = name
off = nextOff
@@ -309,7 +315,7 @@
if restartsStart < 4 { return 0, 0, nil, fmt.Errorf("invalid restart table")}
- for i := 0; i < restartCount; i++ {+ for i := range restartCount {off := restartsStart + i*3
rel := int(readUint24(block.payload[off : off+3]))
base := block.start
@@ -357,14 +363,18 @@
if err != nil {return "", 0, 0, err
}
- suffixLen := int(suffixAndType >> 3)
- if suffixLen < 0 || next+suffixLen > end {+ suffixLen, err := intconv.Uint64ToInt(suffixAndType >> 3)
+ if err != nil || suffixLen < 0 || next+suffixLen > end { return "", 0, 0, fmt.Errorf("invalid suffix length")}
- if int(prefixLen) > len(prev) {+ prefixLenInt, err := intconv.Uint64ToInt(prefixLen)
+ if err != nil { return "", 0, 0, fmt.Errorf("invalid prefix length")}
- name = prev[:prefixLen] + string(buf[next:next+suffixLen])
+ if prefixLenInt > len(prev) {+ return "", 0, 0, fmt.Errorf("invalid prefix length")+ }
+ name = prev[:prefixLenInt] + string(buf[next:next+suffixLen])
next += suffixLen
if prev != "" && strings.Compare(name, prev) <= 0 { return "", 0, 0, fmt.Errorf("keys not strictly increasing")@@ -399,11 +409,23 @@
if err != nil { return recordValue{}, 0, err}
- if targetLen > uint64(end-next) {+ remaining := end - next
+ if remaining < 0 { return recordValue{}, 0, fmt.Errorf("invalid symref target length")}
- target := string(buf[next : next+int(targetLen)])
- next += int(targetLen)
+ remainingU64, err := intconv.IntToUint64(remaining)
+ if err != nil {+ return recordValue{}, 0, fmt.Errorf("invalid symref target length")+ }
+ if targetLen > remainingU64 {+ return recordValue{}, 0, fmt.Errorf("invalid symref target length")+ }
+ targetLenInt, err := intconv.Uint64ToInt(targetLen)
+ if err != nil {+ return recordValue{}, 0, fmt.Errorf("invalid symref target length")+ }
+ target := string(buf[next : next+targetLenInt])
+ next += targetLenInt
return recordValue{symbolicTarget: target}, next, nildefault:
return recordValue{}, 0, fmt.Errorf("unsupported ref value type %d", valueType)--- a/refstore/reftable/reftable_test.go
+++ b/refstore/reftable/reftable_test.go
@@ -40,7 +40,8 @@
}
func TestResolveAndResolveFully(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelperrepo := newBareReftableRepo(t, algo)
_, _, id := repo.MakeCommit(t, "resolve")
repo.UpdateRef(t, "refs/heads/main", id)
@@ -74,7 +75,8 @@
}
func TestResolveFullyCycle(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelperrepo := newBareReftableRepo(t, algo)
repo.SymbolicRef(t, "refs/heads/a", "refs/heads/b")
repo.SymbolicRef(t, "refs/heads/b", "refs/heads/a")
@@ -87,7 +89,8 @@
}
func TestListAndShorten(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelperrepo := newBareReftableRepo(t, algo)
_, _, id := repo.MakeCommit(t, "list")
repo.UpdateRef(t, "refs/heads/main", id)
@@ -133,7 +136,8 @@
}
func TestTombstoneNewestWins(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelperrepo := newBareReftableRepo(t, algo)
_, _, oldID := repo.MakeCommit(t, "old")
repo.UpdateRef(t, "refs/heads/main", oldID)
@@ -149,7 +153,8 @@
}
func TestAnnotatedTagPeeled(t *testing.T) {- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) {+ t.Parallel()
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelperrepo := newBareReftableRepo(t, algo)
_, _, commitID := repo.MakeCommit(t, "tagged")
tagID := repo.TagAnnotated(t, "v1.0.0", commitID, "annotated")
--- a/refstore/reftable/table.go
+++ b/refstore/reftable/table.go
@@ -8,6 +8,7 @@
"os"
"syscall"
+ "codeberg.org/lindenii/furgit/internal/intconv"
"codeberg.org/lindenii/furgit/objectid"
"codeberg.org/lindenii/furgit/ref"
)
@@ -80,11 +81,16 @@
_ = file.Close()
return nil, fmt.Errorf("refstore/reftable: table %q has unsupported size", name)}
- data, err := syscall.Mmap(int(file.Fd()), 0, int(size), syscall.PROT_READ, syscall.MAP_PRIVATE)
+ fd, err := intconv.UintptrToInt(file.Fd())
if err != nil {_ = file.Close()
return nil, err
}
+ data, err := syscall.Mmap(fd, 0, int(size), syscall.PROT_READ, syscall.MAP_PRIVATE)
+ if err != nil {+ _ = file.Close()
+ return nil, err
+ }
out := &tableFile{name: name, algo: algo, file: file, data: data} if err := out.parseMeta(); err != nil {_ = out.close()
@@ -178,7 +184,10 @@
_ = objIndexPos
_ = logIndexPos
- refEnd := uint64(footerStart)
+ refEnd, err := intconv.IntToUint64(footerStart)
+ if err != nil {+ return fmt.Errorf("refstore/reftable: table %q: invalid footer offset: %w", table.name, err)+ }
if table.refIndexPos != 0 && table.refIndexPos < refEnd {refEnd = table.refIndexPos
}
@@ -188,13 +197,25 @@
if logPos != 0 && logPos < refEnd {refEnd = logPos
}
- if refEnd < uint64(table.headerLen) || refEnd > uint64(len(table.data)) {+ headerLenU64, err := intconv.IntToUint64(table.headerLen)
+ if err != nil {+ return fmt.Errorf("refstore/reftable: table %q: invalid header length: %w", table.name, err)+ }
+ dataLenU64, err := intconv.IntToUint64(len(table.data))
+ if err != nil {+ return fmt.Errorf("refstore/reftable: table %q: invalid data length: %w", table.name, err)+ }
+ if refEnd < headerLenU64 || refEnd > dataLenU64 { return fmt.Errorf("refstore/reftable: table %q: invalid ref section", table.name)}
- if table.refIndexPos > uint64(len(table.data)) {+ if table.refIndexPos > dataLenU64 { return fmt.Errorf("refstore/reftable: table %q: invalid ref index position", table.name)}
- table.refEnd = int(refEnd)
+ refEndInt, err := intconv.Uint64ToInt(refEnd)
+ if err != nil {+ return fmt.Errorf("refstore/reftable: table %q: invalid ref section end: %w", table.name, err)+ }
+ table.refEnd = refEndInt
return nil
}
--- a/refstore/shorten_test.go
+++ b/refstore/shorten_test.go
@@ -10,6 +10,7 @@
t.Parallel()
t.Run("simple", func(t *testing.T) {+ t.Parallel()
got := refstore.ShortenName("refs/heads/main", []string{"refs/heads/main"}) if got != "main" { t.Fatalf("ShortenName simple = %q, want %q", got, "main")@@ -17,6 +18,7 @@
})
t.Run("ambiguous with tags", func(t *testing.T) {+ t.Parallel()
got := refstore.ShortenName(
"refs/heads/main",
[]string{@@ -30,6 +32,7 @@
})
t.Run("strict remote head ambiguity", func(t *testing.T) {+ t.Parallel()
// In strict mode, refs/remotes/%s/HEAD blocks shortening to "%s".
got := refstore.ShortenName(
"refs/heads/main",
@@ -44,6 +47,7 @@
})
t.Run("deep fallback still shortens", func(t *testing.T) {+ t.Parallel()
// refs/remotes/origin/main conflicts with refs/heads/origin/main for
// "origin/main", so it should fall back to "remotes/origin/main".
got := refstore.ShortenName(
@@ -59,6 +63,7 @@
})
t.Run("refs-prefix fallback", func(t *testing.T) {+ t.Parallel()
name := "refs/notes/review/topic"
got := refstore.ShortenName(name, []string{name}) if got != "notes/review/topic" {--
⑨