ref: 73e602a5c2c766caba59948e91c11122653705ec
parent: af08c84539f9353718604988ba27ae3c466860fc
author: Runxi Yu <me@runxiyu.org>
date: Tue Mar 10 09:30:48 EDT 2026
commitgraph: Move out of format/
--- a/ancestor/ancestor.go
+++ b/ancestor/ancestor.go
@@ -2,7 +2,7 @@
package ancestor
import (
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/internal/commitquery"
"codeberg.org/lindenii/furgit/internal/peel"
"codeberg.org/lindenii/furgit/objectid"
--- /dev/null
+++ b/commitgraph/TODO
@@ -1,0 +1,6 @@
+Paranoia mode
+Split commit-graph chain with mixed generation and bloom setting
+Separate chunk parsing layer
+Config stuff
+
+Writing
--- /dev/null
+++ b/commitgraph/bloom/bloom.go
@@ -1,0 +1,3 @@
+// Package bloom provides a bloom filter implementation used for changed-path
+// filters in Git commit graphs.
+package bloom
--- /dev/null
+++ b/commitgraph/bloom/constants.go
@@ -1,0 +1,8 @@
+package bloom
+
+const (
+ // DataHeaderSize is the size of the BDAT header in commit-graph files.
+ DataHeaderSize = 3 * 4
+ // DefaultMaxChange matches Git's default max-changed-paths behavior.
+ DefaultMaxChange = 512
+)
--- /dev/null
+++ b/commitgraph/bloom/contain.go
@@ -1,0 +1,25 @@
+package bloom
+
+// MightContain reports whether the Bloom filter may contain the given path.
+//
+// Evaluated against the full path and each of its directory prefixes. A true
+// result indicates a possible match; false means the path definitely did not
+// change.
+func (f *Filter) MightContain(path []byte) (bool, error) {+ if len(f.Data) == 0 {+ return false, nil
+ }
+
+ keys, err := keyvec(path, f)
+ if err != nil {+ return false, err
+ }
+
+ for i := range keys {+ if filterContainsKey(f, keys[i]) {+ return true, nil
+ }
+ }
+
+ return false, nil
+}
--- /dev/null
+++ b/commitgraph/bloom/errors.go
@@ -1,0 +1,5 @@
+package bloom
+
+import "errors"
+
+var ErrInvalid = errors.New("bloom: invalid data")--- /dev/null
+++ b/commitgraph/bloom/filter.go
@@ -1,0 +1,28 @@
+package bloom
+
+// Filter represents a changed-paths Bloom filter associated with a commit.
+//
+// The filter encodes which paths changed between a commit and its first
+// parent. Paths are expected to be in Git's slash-separated form and
+// are queried using a path and its prefixes (e.g. "a/b/c", "a/b", "a").
+type Filter struct {+ Data []byte
+
+ HashVersion uint32
+ NumHashes uint32
+ BitsPerEntry uint32
+ MaxChangePaths uint32
+}
+
+// NewFilter constructs one query-ready bloom filter from raw data/settings.
+func NewFilter(data []byte, settings Settings) *Filter {+ out := &Filter{+ Data: data,
+ HashVersion: settings.HashVersion,
+ NumHashes: settings.NumHashes,
+ BitsPerEntry: settings.BitsPerEntry,
+ MaxChangePaths: settings.MaxChangePaths,
+ }
+
+ return out
+}
--- /dev/null
+++ b/commitgraph/bloom/key.go
@@ -1,0 +1,117 @@
+package bloom
+
+import "codeberg.org/lindenii/furgit/internal/intconv"
+
+type key struct {+ hashes []uint32
+}
+
+func keyvec(path []byte, filter *Filter) ([]key, error) {+ if len(path) == 0 {+ return nil, nil
+ }
+
+ count := 1
+
+ for _, b := range path {+ if b == '/' {+ count++
+ }
+ }
+
+ keys := make([]key, 0, count)
+
+ full, err := keyFill(path, filter)
+ if err != nil {+ return nil, err
+ }
+
+ keys = append(keys, full)
+
+ for i := len(path) - 1; i >= 0; i-- {+ if path[i] == '/' {+ k, err := keyFill(path[:i], filter)
+ if err != nil {+ return nil, err
+ }
+
+ keys = append(keys, k)
+ }
+ }
+
+ return keys, nil
+}
+
+func keyFill(path []byte, filter *Filter) (key, error) {+ const (
+ seed0 = 0x293ae76f
+ seed1 = 0x7e646e2c
+ )
+
+ var (
+ h0 uint32
+ h1 uint32
+ err error
+ )
+
+ switch filter.HashVersion {+ case 2:
+ h0, err = murmur3SeededV2(seed0, path)
+ if err != nil {+ return key{}, err+ }
+
+ h1, err = murmur3SeededV2(seed1, path)
+ if err != nil {+ return key{}, err+ }
+ case 1:
+ h0, err = murmur3SeededV1(seed0, path)
+ if err != nil {+ return key{}, err+ }
+
+ h1, err = murmur3SeededV1(seed1, path)
+ if err != nil {+ return key{}, err+ }
+ default:
+ return key{}, ErrInvalid+ }
+
+ hashCount, err := intconv.Uint32ToInt(filter.NumHashes)
+ if err != nil {+ return key{}, ErrInvalid+ }
+
+ hashes := make([]uint32, hashCount)
+ for i := range hashCount {+ iU32, err := intconv.IntToUint32(i)
+ if err != nil {+ return key{}, ErrInvalid+ }
+
+ hashes[i] = h0 + iU32*h1
+ }
+
+ return key{hashes: hashes}, nil+}
+
+func filterContainsKey(filter *Filter, key key) bool {+ if len(filter.Data) == 0 {+ return false
+ }
+
+ mod := uint64(len(filter.Data)) * 8
+ for _, h := range key.hashes {+ idx := uint64(h) % mod
+ bytePos := idx / 8
+
+ bit := byte(1 << (idx & 7))
+ if filter.Data[bytePos]&bit == 0 {+ return false
+ }
+ }
+
+ return true
+}
--- /dev/null
+++ b/commitgraph/bloom/murmur.go
@@ -1,0 +1,127 @@
+package bloom
+
+import "codeberg.org/lindenii/furgit/internal/intconv"
+
+func murmur3SeededV2(seed uint32, data []byte) (uint32, error) {+ const (
+ c1 = 0xcc9e2d51
+ c2 = 0x1b873593
+ r1 = 15
+ r2 = 13
+ m = 5
+ n = 0xe6546b64
+ )
+
+ h := seed
+
+ nblocks := len(data) / 4
+ for i := range nblocks {+ k := uint32(data[4*i]) |
+ (uint32(data[4*i+1]) << 8) |
+ (uint32(data[4*i+2]) << 16) |
+ (uint32(data[4*i+3]) << 24)
+ k *= c1
+ k = (k << r1) | (k >> (32 - r1))
+ k *= c2
+
+ h ^= k
+ h = (h << r2) | (h >> (32 - r2))
+ h = h*m + n
+ }
+
+ var k1 uint32
+
+ tail := data[nblocks*4:]
+ switch len(tail) & 3 {+ case 3:
+ k1 ^= uint32(tail[2]) << 16
+
+ fallthrough
+ case 2:
+ k1 ^= uint32(tail[1]) << 8
+
+ fallthrough
+ case 1:
+ k1 ^= uint32(tail[0])
+ k1 *= c1
+ k1 = (k1 << r1) | (k1 >> (32 - r1))
+ k1 *= c2
+ h ^= k1
+ }
+
+ dataLen, err := intconv.IntToUint32(len(data))
+ if err != nil {+ return 0, err
+ }
+
+ h ^= dataLen
+ h ^= h >> 16
+ h *= 0x85ebca6b
+ h ^= h >> 13
+ h *= 0xc2b2ae35
+ h ^= h >> 16
+
+ return h, nil
+}
+
+func murmur3SeededV1(seed uint32, data []byte) (uint32, error) {+ const (
+ c1 = 0xcc9e2d51
+ c2 = 0x1b873593
+ r1 = 15
+ r2 = 13
+ m = 5
+ n = 0xe6546b64
+ )
+
+ h := seed
+
+ nblocks := len(data) / 4
+ for i := range nblocks {+ k := intconv.SignExtendByteToUint32(data[4*i]) |
+ (intconv.SignExtendByteToUint32(data[4*i+1]) << 8) |
+ (intconv.SignExtendByteToUint32(data[4*i+2]) << 16) |
+ (intconv.SignExtendByteToUint32(data[4*i+3]) << 24)
+ k *= c1
+ k = (k << r1) | (k >> (32 - r1))
+ k *= c2
+
+ h ^= k
+ h = (h << r2) | (h >> (32 - r2))
+ h = h*m + n
+ }
+
+ var k1 uint32
+
+ tail := data[nblocks*4:]
+ switch len(tail) & 3 {+ case 3:
+ k1 ^= intconv.SignExtendByteToUint32(tail[2]) << 16
+
+ fallthrough
+ case 2:
+ k1 ^= intconv.SignExtendByteToUint32(tail[1]) << 8
+
+ fallthrough
+ case 1:
+ k1 ^= intconv.SignExtendByteToUint32(tail[0])
+ k1 *= c1
+ k1 = (k1 << r1) | (k1 >> (32 - r1))
+ k1 *= c2
+ h ^= k1
+ }
+
+ dataLen, err := intconv.IntToUint32(len(data))
+ if err != nil {+ return 0, err
+ }
+
+ h ^= dataLen
+ h ^= h >> 16
+ h *= 0x85ebca6b
+ h ^= h >> 13
+ h *= 0xc2b2ae35
+ h ^= h >> 16
+
+ return h, nil
+}
--- /dev/null
+++ b/commitgraph/bloom/settings.go
@@ -1,0 +1,50 @@
+package bloom
+
+import (
+ "encoding/binary"
+
+ "codeberg.org/lindenii/furgit/internal/intconv"
+)
+
+// Settings describe the changed-paths Bloom filter parameters stored in
+// commit-graph BDAT chunks.
+//
+// Obviously, they must match the repository's commit-graph settings to
+// interpret filters correctly.
+type Settings struct {+ HashVersion uint32
+ NumHashes uint32
+ BitsPerEntry uint32
+ MaxChangePaths uint32
+}
+
+// ParseSettings reads Bloom filter settings from a BDAT chunk header.
+func ParseSettings(bdat []byte) (*Settings, error) {+ if len(bdat) < DataHeaderSize {+ return nil, ErrInvalid
+ }
+
+ settings := &Settings{+ HashVersion: binary.BigEndian.Uint32(bdat[0:4]),
+ NumHashes: binary.BigEndian.Uint32(bdat[4:8]),
+ BitsPerEntry: binary.BigEndian.Uint32(bdat[8:12]),
+ MaxChangePaths: DefaultMaxChange,
+ }
+
+ switch settings.HashVersion {+ case 1, 2:
+ default:
+ return nil, ErrInvalid
+ }
+
+ if settings.NumHashes == 0 {+ return nil, ErrInvalid
+ }
+
+ _, err := intconv.Uint32ToInt(settings.NumHashes)
+ if err != nil {+ return nil, ErrInvalid
+ }
+
+ return settings, nil
+}
--- /dev/null
+++ b/commitgraph/constants.go
@@ -1,0 +1,32 @@
+package commitgraph
+
+const (
+ FileSignature = 0x43475048 // "CGPH"
+ FileVersion = 1
+)
+
+const (
+ ChunkOIDF = 0x4f494446 // "OIDF"
+ ChunkOIDL = 0x4f49444c // "OIDL"
+ ChunkCDAT = 0x43444154 // "CDAT"
+ ChunkGDA2 = 0x47444132 // "GDA2"
+ ChunkGDO2 = 0x47444f32 // "GDO2"
+ ChunkEDGE = 0x45444745 // "EDGE"
+ ChunkBIDX = 0x42494458 // "BIDX"
+ ChunkBDAT = 0x42444154 // "BDAT"
+ ChunkBASE = 0x42415345 // "BASE"
+)
+
+const (
+ HeaderSize = 8
+ ChunkEntrySize = 12
+ FanoutSize = 256 * 4
+)
+
+const (
+ ParentNone = 0x70000000
+ ParentExtraMask = 0x80000000
+ ParentLastMask = 0x7fffffff
+
+ GenerationOverflow = 0x80000000
+)
--- /dev/null
+++ b/commitgraph/doc.go
@@ -1,0 +1,2 @@
+// Package commitgraph provides constants and common utilities for handling commit graphs.
+package commitgraph
--- /dev/null
+++ b/commitgraph/read/bloom.go
@@ -1,0 +1,114 @@
+package read
+
+import (
+ "encoding/binary"
+
+ "codeberg.org/lindenii/furgit/commitgraph/bloom"
+ "codeberg.org/lindenii/furgit/internal/intconv"
+)
+
+// HasBloom reports whether any layer has changed-path Bloom data.
+func (reader *Reader) HasBloom() bool {+ for i := range reader.layers {+ layer := &reader.layers[i]
+ if layer.chunkBloomIndex != nil && layer.chunkBloomData != nil && layer.bloomSettings != nil {+ return true
+ }
+ }
+
+ return false
+}
+
+// BloomVersion returns the changed-path Bloom hash version, or 0 if absent.
+func (reader *Reader) BloomVersion() uint8 {+ for i := len(reader.layers) - 1; i >= 0; i-- {+ layer := &reader.layers[i]
+ if layer.bloomSettings != nil {+ version, err := intconv.Uint32ToUint8(layer.bloomSettings.HashVersion)
+ if err != nil {+ return 0
+ }
+
+ return version
+ }
+ }
+
+ return 0
+}
+
+// BloomFilterAt returns one commit's changed-path Bloom filter.
+//
+// Returns BloomUnavailableError when this commit graph has no Bloom data.
+func (reader *Reader) BloomFilterAt(pos Position) (*bloom.Filter, error) {+ layer, err := reader.layerByPosition(pos)
+ if err != nil {+ return nil, err
+ }
+
+ if layer.chunkBloomIndex == nil || layer.chunkBloomData == nil || layer.bloomSettings == nil {+ return nil, &BloomUnavailableError{Pos: pos}+ }
+
+ start, end, err := bloomRange(layer, pos.Index)
+ if err != nil {+ return nil, err
+ }
+
+ filter := bloom.NewFilter(
+ layer.chunkBloomData[bloom.DataHeaderSize+start:bloom.DataHeaderSize+end],
+ *layer.bloomSettings,
+ )
+
+ return filter, nil
+}
+
+func bloomRange(layer *layer, commitIndex uint32) (int, int, error) {+ off64 := uint64(commitIndex) * 4
+
+ off, err := intconv.Uint64ToInt(off64)
+ if err != nil {+ return 0, 0, err
+ }
+
+ end := binary.BigEndian.Uint32(layer.chunkBloomIndex[off : off+4])
+
+ var start uint32
+
+ if commitIndex > 0 {+ prevOff64 := uint64(commitIndex-1) * 4
+
+ prevOff, err := intconv.Uint64ToInt(prevOff64)
+ if err != nil {+ return 0, 0, err
+ }
+
+ start = binary.BigEndian.Uint32(layer.chunkBloomIndex[prevOff : prevOff+4])
+ }
+
+ if end < start {+ return 0, 0, &MalformedError{Path: layer.path, Reason: "invalid BIDX range"}+ }
+
+ bdatLen := len(layer.chunkBloomData) - bloom.DataHeaderSize
+
+ bdatLenU32, err := intconv.IntToUint32(bdatLen)
+ if err != nil {+ return 0, 0, err
+ }
+
+ if end > bdatLenU32 {+ return 0, 0, &MalformedError{Path: layer.path, Reason: "BIDX range out of BDAT bounds"}+ }
+
+ startInt, err := intconv.Uint64ToInt(uint64(start))
+ if err != nil {+ return 0, 0, err
+ }
+
+ endInt, err := intconv.Uint64ToInt(uint64(end))
+ if err != nil {+ return 0, 0, err
+ }
+
+ return startInt, endInt, nil
+}
--- /dev/null
+++ b/commitgraph/read/close.go
@@ -1,0 +1,18 @@
+package read
+
+// Close releases all mapped commit-graph files.
+func (reader *Reader) Close() error {+ var closeErr error
+
+ for i := len(reader.layers) - 1; i >= 0; i-- {+ err := reader.layers[i].close()
+ if err != nil && closeErr == nil {+ closeErr = err
+ }
+ }
+
+ reader.layers = nil
+ reader.total = 0
+
+ return closeErr
+}
--- /dev/null
+++ b/commitgraph/read/commitat.go
@@ -1,0 +1,85 @@
+package read
+
+import (
+ "encoding/binary"
+
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+// CommitAt returns decoded commit-graph metadata at one position.
+func (reader *Reader) CommitAt(pos Position) (Commit, error) {+ layer, err := reader.layerByPosition(pos)
+ if err != nil {+ return Commit{}, err+ }
+
+ hashSize := reader.algo.Size()
+ stride := hashSize + 16
+
+ strideU64, err := intconv.IntToUint64(stride)
+ if err != nil {+ return Commit{}, err+ }
+
+ start64 := uint64(pos.Index) * strideU64
+ end64 := start64 + strideU64
+
+ start, err := intconv.Uint64ToInt(start64)
+ if err != nil {+ return Commit{}, err+ }
+
+ end, err := intconv.Uint64ToInt(end64)
+ if err != nil {+ return Commit{}, err+ }
+
+ record := layer.chunkCommit[start:end]
+
+ treeOID, err := objectid.FromBytes(reader.algo, record[:hashSize])
+ if err != nil {+ return Commit{}, err+ }
+
+ oid, err := reader.OIDAt(pos)
+ if err != nil {+ return Commit{}, err+ }
+
+ p1 := binary.BigEndian.Uint32(record[hashSize : hashSize+4])
+ p2 := binary.BigEndian.Uint32(record[hashSize+4 : hashSize+8])
+ genAndTimeHi := binary.BigEndian.Uint32(record[hashSize+8 : hashSize+12])
+ timeLow := binary.BigEndian.Uint32(record[hashSize+12 : hashSize+16])
+
+ timeHigh := uint64(genAndTimeHi & 0x3)
+ commitTimeU64 := (timeHigh << 32) | uint64(timeLow)
+
+ commitTime, err := intconv.Uint64ToInt64(commitTimeU64)
+ if err != nil {+ return Commit{}, err+ }
+
+ generationV1 := genAndTimeHi >> 2
+
+ generationV2, err := reader.readGenerationV2(layer, pos.Index, commitTimeU64)
+ if err != nil {+ return Commit{}, err+ }
+
+ parent1, parent2, extra, err := reader.decodeParents(layer, p1, p2)
+ if err != nil {+ return Commit{}, err+ }
+
+ return Commit{+ OID: oid,
+ TreeOID: treeOID,
+ Parent1: parent1,
+ Parent2: parent2,
+ ExtraParents: extra,
+ CommitTimeUnix: commitTime,
+ GenerationV1: generationV1,
+ GenerationV2: generationV2,
+ }, nil
+}
--- /dev/null
+++ b/commitgraph/read/commits.go
@@ -1,0 +1,20 @@
+package read
+
+import "codeberg.org/lindenii/furgit/objectid"
+
+// Commit stores decoded commit-graph record data.
+type Commit struct {+ OID objectid.ObjectID
+ TreeOID objectid.ObjectID
+ Parent1 ParentRef
+ Parent2 ParentRef
+ ExtraParents []Position
+ CommitTimeUnix int64
+ GenerationV1 uint32
+ GenerationV2 uint64
+}
+
+// NumCommits returns total commits across loaded layers.
+func (reader *Reader) NumCommits() uint32 {+ return reader.total
+}
--- /dev/null
+++ b/commitgraph/read/doc.go
@@ -1,0 +1,2 @@
+// Package read provides routines for reading commit graphs.
+package read
--- /dev/null
+++ b/commitgraph/read/edges.go
@@ -1,0 +1,48 @@
+package read
+
+import (
+ "encoding/binary"
+
+ "codeberg.org/lindenii/furgit/commitgraph"
+ "codeberg.org/lindenii/furgit/internal/intconv"
+)
+
+func (reader *Reader) decodeExtraEdgeList(layer *layer, edgeStart uint32) ([]Position, error) {+ if len(layer.chunkExtraEdges) == 0 {+ return nil, &MalformedError{Path: layer.path, Reason: "missing EDGE chunk"}+ }
+
+ out := make([]Position, 0)
+
+ cur := edgeStart
+ for {+ off64 := uint64(cur) * 4
+
+ off, err := intconv.Uint64ToInt(off64)
+ if err != nil {+ return nil, err
+ }
+
+ if off+4 > len(layer.chunkExtraEdges) {+ return nil, &MalformedError{Path: layer.path, Reason: "EDGE index out of range"}+ }
+
+ word := binary.BigEndian.Uint32(layer.chunkExtraEdges[off : off+4])
+ parentGlobal := word & commitgraph.ParentLastMask
+
+ parentPos, err := reader.globalToPosition(parentGlobal)
+ if err != nil {+ return nil, err
+ }
+
+ out = append(out, parentPos)
+
+ if word&commitgraph.ParentExtraMask != 0 {+ break
+ }
+
+ cur++
+ }
+
+ return out, nil
+}
--- /dev/null
+++ b/commitgraph/read/errors.go
@@ -1,0 +1,58 @@
+package read
+
+import (
+ "fmt"
+
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+// NotFoundError reports a missing commit graph entry by object ID.
+type NotFoundError struct {+ OID objectid.ObjectID
+}
+
+// Error implements error.
+func (err *NotFoundError) Error() string {+ return fmt.Sprintf("commitgraph: object not found: %s", err.OID)+}
+
+// PositionOutOfRangeError reports an invalid graph position.
+type PositionOutOfRangeError struct {+ Pos Position
+}
+
+// Error implements error.
+func (err *PositionOutOfRangeError) Error() string {+ return fmt.Sprintf("commitgraph: position out of range: graph=%d index=%d", err.Pos.Graph, err.Pos.Index)+}
+
+// MalformedError reports malformed commit-graph data.
+type MalformedError struct {+ Path string
+ Reason string
+}
+
+// Error implements error.
+func (err *MalformedError) Error() string {+ return fmt.Sprintf("commitgraph: malformed %q: %s", err.Path, err.Reason)+}
+
+// UnsupportedVersionError reports unsupported commit-graph version.
+type UnsupportedVersionError struct {+ Version uint8
+}
+
+// Error implements error.
+func (err *UnsupportedVersionError) Error() string {+ return fmt.Sprintf("commitgraph: unsupported version %d", err.Version)+}
+
+// BloomUnavailableError reports missing changed-path bloom data at one position.
+type BloomUnavailableError struct {+ Pos Position
+}
+
+// Error implements error.
+func (err *BloomUnavailableError) Error() string {+ return fmt.Sprintf("commitgraph: bloom unavailable at position graph=%d index=%d", err.Pos.Graph, err.Pos.Index)+}
--- /dev/null
+++ b/commitgraph/read/generation.go
@@ -1,0 +1,43 @@
+package read
+
+import (
+ "encoding/binary"
+
+ "codeberg.org/lindenii/furgit/commitgraph"
+ "codeberg.org/lindenii/furgit/internal/intconv"
+)
+
+func (reader *Reader) readGenerationV2(layer *layer, index uint32, commitTime uint64) (uint64, error) {+ if len(layer.chunkGeneration) == 0 {+ return 0, nil
+ }
+
+ off64 := uint64(index) * 4
+
+ off, err := intconv.Uint64ToInt(off64)
+ if err != nil {+ return 0, err
+ }
+
+ value := binary.BigEndian.Uint32(layer.chunkGeneration[off : off+4])
+
+ if value&commitgraph.GenerationOverflow == 0 {+ return commitTime + uint64(value), nil
+ }
+
+ gdo2Index := value ^ commitgraph.GenerationOverflow
+ gdo2Off64 := uint64(gdo2Index) * 8
+
+ gdo2Off, err := intconv.Uint64ToInt(gdo2Off64)
+ if err != nil {+ return 0, err
+ }
+
+ if gdo2Off+8 > len(layer.chunkGenerationOv) {+ return 0, &MalformedError{Path: layer.path, Reason: "GDO2 index out of range"}+ }
+
+ overflow := binary.BigEndian.Uint64(layer.chunkGenerationOv[gdo2Off : gdo2Off+8])
+
+ return commitTime + overflow, nil
+}
--- /dev/null
+++ b/commitgraph/read/hash.go
@@ -1,0 +1,79 @@
+package read
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+// HashVersion returns the commit-graph hash version.
+func (reader *Reader) HashVersion() uint8 {+ return reader.hashVersion
+}
+
+func validateChainBaseHashes(algo objectid.Algorithm, chain []string, idx int, graph *layer) error {+ if idx == 0 {+ if len(graph.chunkBaseGraphs) != 0 {+ return &MalformedError{Path: graph.path, Reason: "unexpected BASE chunk in first graph"}+ }
+
+ return nil
+ }
+
+ hashSize := algo.Size()
+
+ expectedLen := idx * hashSize
+ if len(graph.chunkBaseGraphs) != expectedLen {+ return &MalformedError{+ Path: graph.path,
+ Reason: fmt.Sprintf("BASE chunk length %d does not match expected %d", len(graph.chunkBaseGraphs), expectedLen),+ }
+ }
+
+ for i := range idx {+ start := i * hashSize
+ end := start + hashSize
+
+ baseHash, err := objectid.FromBytes(algo, graph.chunkBaseGraphs[start:end])
+ if err != nil {+ return err
+ }
+
+ if baseHash.String() != chain[i] {+ return &MalformedError{+ Path: graph.path,
+ Reason: fmt.Sprintf("BASE chunk mismatch at index %d", i),+ }
+ }
+ }
+
+ return nil
+}
+
+func verifyTrailerHash(data []byte, algo objectid.Algorithm, path string) error {+ hashSize := algo.Size()
+ if len(data) < hashSize {+ return &MalformedError{Path: path, Reason: "file too short for trailer"}+ }
+
+ hashImpl, err := algo.New()
+ if err != nil {+ return err
+ }
+
+ _, err = io.Copy(hashImpl, bytes.NewReader(data[:len(data)-hashSize]))
+ if err != nil {+ return err
+ }
+
+ got := hashImpl.Sum(nil)
+
+ want := data[len(data)-hashSize:]
+ if !bytes.Equal(got, want) {+ return &MalformedError{Path: path, Reason: "trailer hash mismatch"}+ }
+
+ return nil
+}
--- /dev/null
+++ b/commitgraph/read/iterators.go
@@ -1,0 +1,45 @@
+package read
+
+import (
+ "iter"
+
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+// AllPositions iterates all commit positions in native layer order.
+func (reader *Reader) AllPositions() iter.Seq[Position] {+ return func(yield func(Position) bool) {+ for layerIdx := range reader.layers {+ layer := &reader.layers[layerIdx]
+
+ graph, err := intconv.IntToUint32(layerIdx)
+ if err != nil {+ return
+ }
+
+ for idx := range layer.numCommits {+ if !yield(Position{Graph: graph, Index: idx}) {+ return
+ }
+ }
+ }
+ }
+}
+
+// AllOIDs iterates all commit object IDs in native layer order.
+func (reader *Reader) AllOIDs() iter.Seq[objectid.ObjectID] {+ return func(yield func(objectid.ObjectID) bool) {+ positions := reader.AllPositions()
+ for pos := range positions {+ oid, err := reader.OIDAt(pos)
+ if err != nil {+ return
+ }
+
+ if !yield(oid) {+ return
+ }
+ }
+ }
+}
--- /dev/null
+++ b/commitgraph/read/layer.go
@@ -1,0 +1,28 @@
+package read
+
+import (
+ "os"
+
+ "codeberg.org/lindenii/furgit/commitgraph/bloom"
+)
+
+type layer struct {+ path string
+ file *os.File
+ data []byte
+ numCommits uint32
+ baseCount uint32
+ globalFrom uint32
+
+ chunkOIDFanout []byte
+ chunkOIDLookup []byte
+ chunkCommit []byte
+ chunkGeneration []byte
+ chunkGenerationOv []byte
+ chunkExtraEdges []byte
+ chunkBloomIndex []byte
+ chunkBloomData []byte
+ chunkBaseGraphs []byte
+
+ bloomSettings *bloom.Settings
+}
--- /dev/null
+++ b/commitgraph/read/layer_close.go
@@ -1,0 +1,33 @@
+package read
+
+import "syscall"
+
+func closeLayers(layers []layer) {+ for i := len(layers) - 1; i >= 0; i-- {+ _ = layers[i].close()
+ }
+}
+
+func (layer *layer) close() error {+ var closeErr error
+
+ if layer.data != nil {+ err := syscall.Munmap(layer.data)
+ if err != nil {+ closeErr = err
+ }
+
+ layer.data = nil
+ }
+
+ if layer.file != nil {+ err := layer.file.Close()
+ if err != nil && closeErr == nil {+ closeErr = err
+ }
+
+ layer.file = nil
+ }
+
+ return closeErr
+}
--- /dev/null
+++ b/commitgraph/read/layer_lookup.go
@@ -1,0 +1,53 @@
+package read
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+func layerLookup(layer *layer, oid objectid.ObjectID) (uint32, bool) {+ hashSize := oid.Size()
+ first := int(oid.RawBytes()[0])
+
+ var lo uint32
+ if first > 0 {+ lo = binary.BigEndian.Uint32(layer.chunkOIDFanout[(first-1)*4 : first*4])
+ }
+
+ hi := binary.BigEndian.Uint32(layer.chunkOIDFanout[first*4 : (first+1)*4])
+ if hi == 0 || lo >= hi {+ return 0, false
+ }
+
+ target := oid.RawBytes()
+ left := int(lo)
+
+ right := int(hi) - 1
+ for left <= right {+ mid := left + (right-left)/2
+ start := mid * hashSize
+ end := start + hashSize
+
+ current := layer.chunkOIDLookup[start:end]
+
+ cmp := bytes.Compare(current, target)
+ switch {+ case cmp == 0:
+ pos, err := intconv.IntToUint32(mid)
+ if err != nil {+ return 0, false
+ }
+
+ return pos, true
+ case cmp < 0:
+ left = mid + 1
+ default:
+ right = mid - 1
+ }
+ }
+
+ return 0, false
+}
--- /dev/null
+++ b/commitgraph/read/layer_open.go
@@ -1,0 +1,81 @@
+package read
+
+import (
+ "os"
+ "syscall"
+
+ "codeberg.org/lindenii/furgit/commitgraph"
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+func openLayer(root *os.Root, relPath string, algo objectid.Algorithm) (*layer, error) {+ file, err := root.Open(relPath)
+ if err != nil {+ return nil, err
+ }
+
+ info, err := file.Stat()
+ if err != nil {+ _ = file.Close()
+
+ return nil, err
+ }
+
+ size := info.Size()
+ if size < int64(commitgraph.HeaderSize+commitgraph.FanoutSize+algo.Size()) {+ _ = file.Close()
+
+ return nil, &MalformedError{Path: relPath, Reason: "file too short"}+ }
+
+ mapLen, err := intconv.Int64ToUint64(size)
+ if err != nil {+ _ = file.Close()
+
+ return nil, err
+ }
+
+ mapLenInt, err := intconv.Uint64ToInt(mapLen)
+ if err != nil {+ _ = file.Close()
+
+ return nil, err
+ }
+
+ fd, err := intconv.UintptrToInt(file.Fd())
+ if err != nil {+ _ = file.Close()
+
+ return nil, err
+ }
+
+ data, err := syscall.Mmap(fd, 0, mapLenInt, syscall.PROT_READ, syscall.MAP_PRIVATE)
+ if err != nil {+ _ = file.Close()
+
+ return nil, err
+ }
+
+ out := &layer{+ path: relPath,
+ file: file,
+ data: data,
+ }
+
+ parseErr := parseLayer(out, algo)
+ if parseErr != nil {+ _ = out.close()
+
+ return nil, parseErr
+ }
+
+ verifyErr := verifyTrailerHash(out.data, algo, relPath)
+ if verifyErr != nil {+ _ = out.close()
+
+ return nil, verifyErr
+ }
+
+ return out, nil
+}
--- /dev/null
+++ b/commitgraph/read/layer_parse.go
@@ -1,0 +1,276 @@
+package read
+
+import (
+ "encoding/binary"
+
+ "codeberg.org/lindenii/furgit/commitgraph"
+ "codeberg.org/lindenii/furgit/commitgraph/bloom"
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+func parseLayer(layer *layer, algo objectid.Algorithm) error { //nolint:maintidx+ if len(layer.data) < commitgraph.HeaderSize {+ return &MalformedError{Path: layer.path, Reason: "file too short"}+ }
+
+ header := layer.data[:commitgraph.HeaderSize]
+
+ signature := binary.BigEndian.Uint32(header[:4])
+ if signature != commitgraph.FileSignature {+ return &MalformedError{Path: layer.path, Reason: "invalid signature"}+ }
+
+ version := header[4]
+ if version != commitgraph.FileVersion {+ return &UnsupportedVersionError{Version: version}+ }
+
+ expectedHashVersion, err := intconv.Uint32ToUint8(algo.PackHashID())
+ if err != nil {+ return err
+ }
+
+ hashVersion := header[5]
+ if hashVersion != expectedHashVersion {+ return &MalformedError{Path: layer.path, Reason: "hash version does not match object format"}+ }
+
+ numChunks := int(header[6])
+ baseCount := uint32(header[7])
+
+ tocLen := (numChunks + 1) * commitgraph.ChunkEntrySize
+ tocStart := commitgraph.HeaderSize
+
+ tocEnd := tocStart + tocLen
+ if tocEnd > len(layer.data) {+ return &MalformedError{Path: layer.path, Reason: "truncated chunk table"}+ }
+
+ type tocEntry struct {+ id uint32
+ offset uint64
+ }
+
+ entries := make([]tocEntry, 0, numChunks+1)
+ for i := range numChunks + 1 {+ entryOff := tocStart + i*commitgraph.ChunkEntrySize
+ entryData := layer.data[entryOff : entryOff+commitgraph.ChunkEntrySize]
+
+ entry := tocEntry{+ id: binary.BigEndian.Uint32(entryData[:4]),
+ offset: binary.BigEndian.Uint64(entryData[4:]),
+ }
+ entries = append(entries, entry)
+ }
+
+ if entries[len(entries)-1].id != 0 {+ return &MalformedError{Path: layer.path, Reason: "missing chunk table terminator"}+ }
+
+ trailerStart := len(layer.data) - algo.Size()
+
+ chunks := make(map[uint32][]byte, numChunks)
+ for i := range numChunks {+ entry := entries[i]
+ if entry.id == 0 {+ return &MalformedError{Path: layer.path, Reason: "early chunk table terminator"}+ }
+
+ next := entries[i+1]
+
+ start, err := intconv.Uint64ToInt(entry.offset)
+ if err != nil {+ return err
+ }
+
+ end, err := intconv.Uint64ToInt(next.offset)
+ if err != nil {+ return err
+ }
+
+ if start < tocEnd || end < start || end > trailerStart {+ return &MalformedError{Path: layer.path, Reason: "invalid chunk offsets"}+ }
+
+ if _, exists := chunks[entry.id]; exists {+ return &MalformedError{Path: layer.path, Reason: "duplicate chunk id"}+ }
+
+ chunks[entry.id] = layer.data[start:end]
+ }
+
+ oidf := chunks[commitgraph.ChunkOIDF]
+ if len(oidf) != commitgraph.FanoutSize {+ return &MalformedError{Path: layer.path, Reason: "invalid OIDF length"}+ }
+
+ layer.chunkOIDFanout = oidf
+ layer.numCommits = binary.BigEndian.Uint32(oidf[commitgraph.FanoutSize-4:])
+
+ for i := range 255 {+ cur := binary.BigEndian.Uint32(oidf[i*4 : (i+1)*4])
+
+ next := binary.BigEndian.Uint32(oidf[(i+1)*4 : (i+2)*4])
+ if cur > next {+ return &MalformedError{Path: layer.path, Reason: "non-monotonic OIDF fanout"}+ }
+ }
+
+ hashSize := algo.Size()
+
+ hashSizeU64, err := intconv.IntToUint64(hashSize)
+ if err != nil {+ return err
+ }
+
+ oidl := chunks[commitgraph.ChunkOIDL]
+ oidlWantLen64 := uint64(layer.numCommits) * hashSizeU64
+
+ oidlWantLen, err := intconv.Uint64ToInt(oidlWantLen64)
+ if err != nil {+ return err
+ }
+
+ if len(oidl) != oidlWantLen {+ return &MalformedError{Path: layer.path, Reason: "invalid OIDL length"}+ }
+
+ layer.chunkOIDLookup = oidl
+
+ stride := hashSize + 16
+
+ strideU64, err := intconv.IntToUint64(stride)
+ if err != nil {+ return err
+ }
+
+ cdat := chunks[commitgraph.ChunkCDAT]
+ cdatWantLen64 := uint64(layer.numCommits) * strideU64
+
+ cdatWantLen, err := intconv.Uint64ToInt(cdatWantLen64)
+ if err != nil {+ return err
+ }
+
+ if len(cdat) != cdatWantLen {+ return &MalformedError{Path: layer.path, Reason: "invalid CDAT length"}+ }
+
+ layer.chunkCommit = cdat
+
+ gda2 := chunks[commitgraph.ChunkGDA2]
+ if len(gda2) != 0 {+ wantLen64 := uint64(layer.numCommits) * 4
+
+ wantLen, err := intconv.Uint64ToInt(wantLen64)
+ if err != nil {+ return err
+ }
+
+ if len(gda2) != wantLen {+ return &MalformedError{Path: layer.path, Reason: "invalid GDA2 length"}+ }
+
+ layer.chunkGeneration = gda2
+ }
+
+ gdo2 := chunks[commitgraph.ChunkGDO2]
+ if len(gdo2) != 0 {+ if len(gdo2)%8 != 0 {+ return &MalformedError{Path: layer.path, Reason: "invalid GDO2 length"}+ }
+
+ layer.chunkGenerationOv = gdo2
+ }
+
+ edge := chunks[commitgraph.ChunkEDGE]
+ if len(edge) != 0 {+ if len(edge)%4 != 0 {+ return &MalformedError{Path: layer.path, Reason: "invalid EDGE length"}+ }
+
+ layer.chunkExtraEdges = edge
+ }
+
+ base := chunks[commitgraph.ChunkBASE]
+ if baseCount == 0 {+ if len(base) != 0 {+ return &MalformedError{Path: layer.path, Reason: "unexpected BASE chunk"}+ }
+ } else {+ wantLen64 := uint64(baseCount) * hashSizeU64
+
+ wantLen, err := intconv.Uint64ToInt(wantLen64)
+ if err != nil {+ return err
+ }
+
+ if len(base) != wantLen {+ return &MalformedError{Path: layer.path, Reason: "invalid BASE length"}+ }
+
+ layer.chunkBaseGraphs = base
+ }
+
+ layer.baseCount = baseCount
+
+ bidx := chunks[commitgraph.ChunkBIDX]
+
+ bdat := chunks[commitgraph.ChunkBDAT]
+ if len(bidx) != 0 || len(bdat) != 0 { //nolint:nestif+ if len(bidx) == 0 || len(bdat) == 0 {+ return &MalformedError{Path: layer.path, Reason: "BIDX/BDAT must both be present"}+ }
+
+ bidxWantLen64 := uint64(layer.numCommits) * 4
+
+ bidxWantLen, err := intconv.Uint64ToInt(bidxWantLen64)
+ if err != nil {+ return err
+ }
+
+ if len(bidx) != bidxWantLen {+ return &MalformedError{Path: layer.path, Reason: "invalid BIDX length"}+ }
+
+ if len(bdat) < bloom.DataHeaderSize {+ return &MalformedError{Path: layer.path, Reason: "invalid BDAT length"}+ }
+
+ settings, err := bloom.ParseSettings(bdat)
+ if err != nil {+ return err
+ }
+
+ prev := uint32(0)
+
+ for i := range layer.numCommits {+ off := int(i) * 4
+
+ cur := binary.BigEndian.Uint32(bidx[off : off+4])
+ if i > 0 && cur < prev {+ return &MalformedError{Path: layer.path, Reason: "non-monotonic BIDX"}+ }
+
+ bdatDataLen := len(bdat) - bloom.DataHeaderSize
+
+ bdatDataLenU32, err := intconv.IntToUint32(bdatDataLen)
+ if err != nil {+ return err
+ }
+
+ if cur > bdatDataLenU32 {+ return &MalformedError{Path: layer.path, Reason: "BIDX offset out of range"}+ }
+
+ prev = cur
+ }
+
+ layer.chunkBloomIndex = bidx
+ layer.chunkBloomData = bdat
+ layer.bloomSettings = settings
+ }
+
+ return nil
+}
--- /dev/null
+++ b/commitgraph/read/layer_pos.go
@@ -1,0 +1,21 @@
+package read
+
+import "codeberg.org/lindenii/furgit/internal/intconv"
+
+func (reader *Reader) layerByPosition(pos Position) (*layer, error) {+ graphIdx, err := intconv.Uint64ToInt(uint64(pos.Graph))
+ if err != nil {+ return nil, err
+ }
+
+ if graphIdx < 0 || graphIdx >= len(reader.layers) {+ return nil, &PositionOutOfRangeError{Pos: pos}+ }
+
+ layer := &reader.layers[graphIdx]
+ if pos.Index >= layer.numCommits {+ return nil, &PositionOutOfRangeError{Pos: pos}+ }
+
+ return layer, nil
+}
--- /dev/null
+++ b/commitgraph/read/layerinfo.go
@@ -1,0 +1,23 @@
+package read
+
+// LayerInfo describes one loaded commit-graph layer.
+type LayerInfo struct {+ Path string
+ BaseCount uint32
+ Commits uint32
+}
+
+// Layers returns loaded layer metadata in native chain order.
+func (reader *Reader) Layers() []LayerInfo {+ out := make([]LayerInfo, 0, len(reader.layers))
+ for i := range reader.layers {+ layer := reader.layers[i]
+ out = append(out, LayerInfo{+ Path: layer.path,
+ BaseCount: layer.baseCount,
+ Commits: layer.numCommits,
+ })
+ }
+
+ return out
+}
--- /dev/null
+++ b/commitgraph/read/lookup.go
@@ -1,0 +1,29 @@
+package read
+
+import (
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+// Lookup resolves one object ID to one graph position.
+func (reader *Reader) Lookup(oid objectid.ObjectID) (Position, error) {+ if oid.Algorithm() != reader.algo {+ return Position{}, &NotFoundError{OID: oid}+ }
+
+ for layerIdx := len(reader.layers) - 1; layerIdx >= 0; layerIdx-- {+ layer := &reader.layers[layerIdx]
+
+ found, ok := layerLookup(layer, oid)
+ if ok {+ idxU32, err := intconv.IntToUint32(layerIdx)
+ if err != nil {+ return Position{}, err+ }
+
+ return Position{Graph: idxU32, Index: found}, nil+ }
+ }
+
+ return Position{}, &NotFoundError{OID: oid}+}
--- /dev/null
+++ b/commitgraph/read/mode.go
@@ -1,0 +1,11 @@
+package read
+
+// OpenMode controls which commit-graph layout Open loads.
+type OpenMode uint8
+
+const (
+ // OpenSingle opens one commit-graph file at info/commit-graph.
+ OpenSingle OpenMode = iota
+ // OpenChain opens chained commit-graphs from info/commit-graphs.
+ OpenChain
+)
--- /dev/null
+++ b/commitgraph/read/oidat.go
@@ -1,0 +1,36 @@
+package read
+
+import (
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+// OIDAt returns object ID at one position.
+func (reader *Reader) OIDAt(pos Position) (objectid.ObjectID, error) {+ layer, err := reader.layerByPosition(pos)
+ if err != nil {+ return objectid.ObjectID{}, err+ }
+
+ hashSize := reader.algo.Size()
+
+ hashSizeU64, err := intconv.IntToUint64(hashSize)
+ if err != nil {+ return objectid.ObjectID{}, err+ }
+
+ start64 := uint64(pos.Index) * hashSizeU64
+ end64 := start64 + hashSizeU64
+
+ start, err := intconv.Uint64ToInt(start64)
+ if err != nil {+ return objectid.ObjectID{}, err+ }
+
+ end, err := intconv.Uint64ToInt(end64)
+ if err != nil {+ return objectid.ObjectID{}, err+ }
+
+ return objectid.FromBytes(reader.algo, layer.chunkOIDLookup[start:end])
+}
--- /dev/null
+++ b/commitgraph/read/open.go
@@ -1,0 +1,24 @@
+package read
+
+import (
+ "fmt"
+ "os"
+
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+// Open opens commit-graph data from one objects root.
+func Open(root *os.Root, algo objectid.Algorithm, mode OpenMode) (*Reader, error) {+ if algo.Size() == 0 {+ return nil, objectid.ErrInvalidAlgorithm
+ }
+
+ switch mode {+ case OpenSingle:
+ return openSingle(root, algo)
+ case OpenChain:
+ return openChain(root, algo)
+ default:
+ return nil, fmt.Errorf("commitgraph: invalid open mode %d", mode)+ }
+}
--- /dev/null
+++ b/commitgraph/read/open_chain.go
@@ -1,0 +1,133 @@
+package read
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+func openChain(root *os.Root, algo objectid.Algorithm) (*Reader, error) {+ chainPath := "info/commit-graphs/commit-graph-chain"
+
+ file, err := root.Open(chainPath)
+ if err != nil {+ if errors.Is(err, os.ErrNotExist) {+ return nil, &MalformedError{Path: chainPath, Reason: "missing commit-graph-chain"}+ }
+
+ return nil, err
+ }
+
+ scanner := bufio.NewScanner(file)
+ hashes := make([]string, 0)
+
+ for scanner.Scan() {+ line := strings.TrimSpace(scanner.Text())
+ if line == "" {+ continue
+ }
+
+ hashes = append(hashes, line)
+ }
+
+ scanErr := scanner.Err()
+ closeErr := file.Close()
+
+ if scanErr != nil {+ return nil, scanErr
+ }
+
+ if closeErr != nil {+ return nil, closeErr
+ }
+
+ if len(hashes) == 0 {+ return nil, &MalformedError{Path: chainPath, Reason: "empty chain"}+ }
+
+ layers := make([]layer, 0, len(hashes))
+
+ var total uint32
+
+ hashVersion, err := intconv.Uint32ToUint8(algo.PackHashID())
+ if err != nil {+ return nil, err
+ }
+
+ for i, hashHex := range hashes {+ expectedBaseCount, err := intconv.IntToUint32(i)
+ if err != nil {+ closeLayers(layers)
+
+ return nil, err
+ }
+
+ if len(hashHex) != algo.HexLen() {+ closeLayers(layers)
+
+ return nil, &MalformedError{+ Path: chainPath,
+ Reason: fmt.Sprintf("invalid graph hash length at line %d", i+1),+ }
+ }
+
+ relPath := fmt.Sprintf("info/commit-graphs/graph-%s.graph", hashHex)+
+ loaded, loadErr := openLayer(root, relPath, algo)
+ if loadErr != nil {+ closeLayers(layers)
+
+ return nil, loadErr
+ }
+
+ if loaded.baseCount != expectedBaseCount {+ _ = loaded.close()
+
+ closeLayers(layers)
+
+ return nil, &MalformedError{+ Path: relPath,
+ Reason: fmt.Sprintf("BASE count %d does not match chain depth %d", loaded.baseCount, i),+ }
+ }
+
+ validateErr := validateChainBaseHashes(algo, hashes, i, loaded)
+ if validateErr != nil {+ _ = loaded.close()
+
+ closeLayers(layers)
+
+ return nil, validateErr
+ }
+
+ loaded.globalFrom = total
+ loaded.baseCount = expectedBaseCount
+
+ totalNext := total + loaded.numCommits
+ if totalNext < total {+ _ = loaded.close()
+
+ closeLayers(layers)
+
+ return nil, &MalformedError{Path: relPath, Reason: "total commit count overflow"}+ }
+
+ total = totalNext
+
+ layers = append(layers, *loaded)
+ }
+
+ out := &Reader{+ algo: algo,
+ hashVersion: hashVersion,
+ layers: layers,
+ total: total,
+ }
+
+ return out, nil
+}
--- /dev/null
+++ b/commitgraph/read/open_single.go
@@ -1,0 +1,32 @@
+package read
+
+import (
+ "os"
+
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+func openSingle(root *os.Root, algo objectid.Algorithm) (*Reader, error) {+ graph, err := openLayer(root, "info/commit-graph", algo)
+ if err != nil {+ return nil, err
+ }
+
+ graph.baseCount = 0
+ graph.globalFrom = 0
+
+ hashVersion, err := intconv.Uint32ToUint8(algo.PackHashID())
+ if err != nil {+ return nil, err
+ }
+
+ out := &Reader{+ algo: algo,
+ hashVersion: hashVersion,
+ layers: []layer{*graph},+ total: graph.numCommits,
+ }
+
+ return out, nil
+}
--- /dev/null
+++ b/commitgraph/read/parents.go
@@ -1,0 +1,67 @@
+package read
+
+import "codeberg.org/lindenii/furgit/commitgraph"
+
+// ParentRef references one parent position.
+type ParentRef struct {+ Valid bool
+ Pos Position
+}
+
+func (reader *Reader) decodeParents(layer *layer, p1, p2 uint32) (ParentRef, ParentRef, []Position, error) {+ parent1, err := reader.decodeSingleParent(p1)
+ if err != nil {+ return ParentRef{}, ParentRef{}, nil, err+ }
+
+ if p2 == commitgraph.ParentNone {+ return parent1, ParentRef{}, nil, nil+ }
+
+ if p2&commitgraph.ParentExtraMask == 0 {+ parent2, err := reader.decodeSingleParent(p2)
+ if err != nil {+ return ParentRef{}, ParentRef{}, nil, err+ }
+
+ return parent1, parent2, nil, nil
+ }
+
+ edgeStart := p2 & commitgraph.ParentLastMask
+
+ parents, err := reader.decodeExtraEdgeList(layer, edgeStart)
+ if err != nil {+ return ParentRef{}, ParentRef{}, nil, err+ }
+
+ if len(parents) == 0 {+ return ParentRef{}, ParentRef{}, nil, &MalformedError{Path: layer.path, Reason: "empty EDGE list"}+ }
+
+ parent2 := ParentRef{Valid: true, Pos: parents[0]}+ if len(parents) == 1 {+ return parent1, parent2, nil, nil
+ }
+
+ return parent1, parent2, parents[1:], nil
+}
+
+func (reader *Reader) decodeSingleParent(raw uint32) (ParentRef, error) {+ if raw == commitgraph.ParentNone {+ return ParentRef{}, nil+ }
+
+ if raw&commitgraph.ParentExtraMask != 0 {+ return ParentRef{}, &MalformedError{+ Path: "commit-graph",
+ Reason: "unexpected EDGE marker in single-parent slot",
+ }
+ }
+
+ pos, err := reader.globalToPosition(raw)
+ if err != nil {+ return ParentRef{}, err+ }
+
+ return ParentRef{Valid: true, Pos: pos}, nil+}
--- /dev/null
+++ b/commitgraph/read/position.go
@@ -1,0 +1,38 @@
+package read
+
+import (
+ "fmt"
+
+ "codeberg.org/lindenii/furgit/internal/intconv"
+)
+
+// Position identifies one commit record by layer and row index.
+type Position struct {+ Graph uint32
+ Index uint32
+}
+
+func (reader *Reader) globalToPosition(global uint32) (Position, error) {+ for i := range reader.layers {+ layer := &reader.layers[i]
+ from := layer.globalFrom
+
+ to := from + layer.numCommits
+ if global >= from && global < to {+ graph, err := intconv.IntToUint32(i)
+ if err != nil {+ return Position{}, err+ }
+
+ return Position{+ Graph: graph,
+ Index: global - from,
+ }, nil
+ }
+ }
+
+ return Position{}, &MalformedError{+ Path: "commit-graph",
+ Reason: fmt.Sprintf("parent global position out of range: %d", global),+ }
+}
--- /dev/null
+++ b/commitgraph/read/read_test.go
@@ -1,0 +1,322 @@
+package read_test
+
+import (
+ "errors"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "testing"
+
+ "codeberg.org/lindenii/furgit/commitgraph/bloom"
+ "codeberg.org/lindenii/furgit/commitgraph/read"
+ "codeberg.org/lindenii/furgit/internal/intconv"
+ "codeberg.org/lindenii/furgit/internal/testgit"
+ "codeberg.org/lindenii/furgit/objectid"
+)
+
+func fixtureRepoPath(t *testing.T, algo objectid.Algorithm, name string) string {+ t.Helper()
+
+ return filepath.Join("testdata", "fixtures", algo.String(), name, "repo.git")+}
+
+func fixtureRepo(t *testing.T, algo objectid.Algorithm, name string) *testgit.TestRepo {+ t.Helper()
+
+ return testgit.NewRepoFromFixture(t, algo, fixtureRepoPath(t, algo, name))
+}
+
+func TestReadSingleMatchesGit(t *testing.T) {+ t.Parallel()
+
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper+ testRepo := fixtureRepo(t, algo, "single_changed")
+
+ reader := openReader(t, testRepo, read.OpenSingle)
+
+ defer func() { _ = reader.Close() }()+
+ allIDs := testRepo.RevList(t, "--all")
+ if len(allIDs) == 0 {+ t.Fatal("git rev-list --all returned no commits")+ }
+
+ wantCommitCount, err := intconv.IntToUint32(len(allIDs))
+ if err != nil {+ t.Fatalf("len(allIDs) convert: %v", err)+ }
+
+ if got := reader.NumCommits(); got != wantCommitCount {+ t.Fatalf("NumCommits() = %d, want %d", got, len(allIDs))+ }
+
+ if !reader.HasBloom() {+ t.Fatal("HasBloom() = false, want true")+ }
+
+ bloomVersion := reader.BloomVersion()
+ if bloomVersion == 0 {+ t.Fatal("BloomVersion() = 0, want non-zero when HasBloom() is true")+ }
+
+ for _, id := range allIDs {+ pos, err := reader.Lookup(id)
+ if err != nil {+ t.Fatalf("Lookup(%s): %v", id, err)+ }
+
+ gotID, err := reader.OIDAt(pos)
+ if err != nil {+ t.Fatalf("OIDAt(%+v): %v", pos, err)+ }
+
+ if gotID != id {+ t.Fatalf("OIDAt(Lookup(%s)) = %s, want %s", id, gotID, id)+ }
+ }
+
+ step := max(len(allIDs)/24, 1)
+
+ for i, id := range allIDs {+ if i%step != 0 && i != len(allIDs)-1 {+ continue
+ }
+
+ verifyCommitAgainstGit(t, testRepo, reader, id)
+ }
+ })
+}
+
+func TestReadChainMatchesGit(t *testing.T) {+ t.Parallel()
+
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper+ testRepo := fixtureRepo(t, algo, "chain_changed")
+
+ reader := openReader(t, testRepo, read.OpenChain)
+
+ defer func() { _ = reader.Close() }()+
+ layers := reader.Layers()
+ if len(layers) < 2 {+ t.Fatalf("Layers len = %d, want >= 2", len(layers))+ }
+
+ allIDs := testRepo.RevList(t, "--all")
+
+ wantCommitCount, err := intconv.IntToUint32(len(allIDs))
+ if err != nil {+ t.Fatalf("len(allIDs) convert: %v", err)+ }
+
+ if got := reader.NumCommits(); got != wantCommitCount {+ t.Fatalf("NumCommits() = %d, want %d", got, len(allIDs))+ }
+
+ step := max(len(allIDs)/20, 1)
+
+ for i, id := range allIDs {+ pos, err := reader.Lookup(id)
+ if err != nil {+ t.Fatalf("Lookup(%s): %v", id, err)+ }
+
+ if i%step != 0 && i != len(allIDs)-1 {+ continue
+ }
+
+ gotID, err := reader.OIDAt(pos)
+ if err != nil {+ t.Fatalf("OIDAt(%+v): %v", pos, err)+ }
+
+ if gotID != id {+ t.Fatalf("OIDAt(Lookup(%s)) = %s, want %s", id, gotID, id)+ }
+ }
+ })
+}
+
+func TestBloomUnavailableWithoutChangedPaths(t *testing.T) {+ t.Parallel()
+
+ testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper+ testRepo := fixtureRepo(t, algo, "single_nochanged")
+
+ reader := openReader(t, testRepo, read.OpenSingle)
+
+ defer func() { _ = reader.Close() }()+
+ head := testRepo.RevParse(t, "HEAD")
+
+ pos, err := reader.Lookup(head)
+ if err != nil {+ t.Fatalf("Lookup(%s): %v", head, err)+ }
+
+ _, err = reader.BloomFilterAt(pos)
+ if err == nil {+ t.Fatal("BloomFilterAt() error = nil, want BloomUnavailableError")+ }
+
+ unavailable, ok := errors.AsType[*read.BloomUnavailableError](err)
+ if !ok {+ t.Fatalf("BloomFilterAt() error type = %T, want *BloomUnavailableError", err)+ }
+
+ if unavailable.Pos != pos {+ t.Fatalf("BloomUnavailableError.Pos = %+v, want %+v", unavailable.Pos, pos)+ }
+ })
+}
+
+func openReader(tb testing.TB, testRepo *testgit.TestRepo, mode read.OpenMode) *read.Reader {+ tb.Helper()
+
+ root := testRepo.OpenObjectsRoot(tb)
+
+ reader, err := read.Open(root, testRepo.Algorithm(), mode)
+ if err != nil {+ tb.Fatalf("read.Open(objects): %v", err)+ }
+
+ return reader
+}
+
+func verifyCommitAgainstGit(tb testing.TB, testRepo *testgit.TestRepo, reader *read.Reader, id objectid.ObjectID) {+ tb.Helper()
+
+ pos, err := reader.Lookup(id)
+ if err != nil {+ tb.Fatalf("Lookup(%s): %v", id, err)+ }
+
+ commit, err := reader.CommitAt(pos)
+ if err != nil {+ tb.Fatalf("CommitAt(%+v): %v", pos, err)+ }
+
+ if commit.OID != id {+ tb.Fatalf("CommitAt(%+v).OID = %s, want %s", pos, commit.OID, id)+ }
+
+ treeHex := testRepo.Run(tb, "show", "-s", "--format=%T", id.String())
+
+ wantTree, err := objectid.ParseHex(testRepo.Algorithm(), treeHex)
+ if err != nil {+ tb.Fatalf("parse tree id %q: %v", treeHex, err)+ }
+
+ if commit.TreeOID != wantTree {+ tb.Fatalf("CommitAt(%+v).TreeOID = %s, want %s", pos, commit.TreeOID, wantTree)+ }
+
+ wantParents := parseOIDLine(tb, testRepo.Algorithm(), testRepo.Run(tb, "show", "-s", "--format=%P", id.String()))
+
+ gotParents := commitParents(tb, reader, commit)
+ if len(gotParents) != len(wantParents) {+ tb.Fatalf("parent count for %s = %d, want %d", id, len(gotParents), len(wantParents))+ }
+
+ for i := range gotParents {+ if gotParents[i] != wantParents[i] {+ tb.Fatalf("parent %d for %s = %s, want %s", i, id, gotParents[i], wantParents[i])+ }
+ }
+
+ commitTimeRaw := testRepo.Run(tb, "show", "-s", "--format=%ct", id.String())
+
+ wantCommitTime, err := strconv.ParseInt(strings.TrimSpace(commitTimeRaw), 10, 64)
+ if err != nil {+ tb.Fatalf("parse commit time %q: %v", commitTimeRaw, err)+ }
+
+ if commit.CommitTimeUnix != wantCommitTime {+ tb.Fatalf("CommitAt(%+v).CommitTimeUnix = %d, want %d", pos, commit.CommitTimeUnix, wantCommitTime)+ }
+
+ filter, err := reader.BloomFilterAt(pos)
+ if err != nil {+ tb.Fatalf("BloomFilterAt(%+v): %v", pos, err)+ }
+
+ if filter.HashVersion != uint32(reader.BloomVersion()) {+ tb.Fatalf("filter.HashVersion = %d, want %d", filter.HashVersion, reader.BloomVersion())+ }
+
+ assertChangedPathsBloomPositive(tb, testRepo, filter, id)
+}
+
+func commitParents(tb testing.TB, reader *read.Reader, commit read.Commit) []objectid.ObjectID {+ tb.Helper()
+
+ out := make([]objectid.ObjectID, 0, 2+len(commit.ExtraParents))
+
+ if commit.Parent1.Valid {+ id, err := reader.OIDAt(commit.Parent1.Pos)
+ if err != nil {+ tb.Fatalf("OIDAt(parent1 %+v): %v", commit.Parent1.Pos, err)+ }
+
+ out = append(out, id)
+ }
+
+ if commit.Parent2.Valid {+ id, err := reader.OIDAt(commit.Parent2.Pos)
+ if err != nil {+ tb.Fatalf("OIDAt(parent2 %+v): %v", commit.Parent2.Pos, err)+ }
+
+ out = append(out, id)
+ }
+
+ for _, parentPos := range commit.ExtraParents {+ id, err := reader.OIDAt(parentPos)
+ if err != nil {+ tb.Fatalf("OIDAt(extra parent %+v): %v", parentPos, err)+ }
+
+ out = append(out, id)
+ }
+
+ return out
+}
+
+func assertChangedPathsBloomPositive(tb testing.TB, testRepo *testgit.TestRepo, filter *bloom.Filter, commitID objectid.ObjectID) {+ tb.Helper()
+
+ changedPaths := testRepo.Run(tb, "diff-tree", "--no-commit-id", "--name-only", "-r", "--root", commitID.String())
+ for line := range strings.SplitSeq(strings.TrimSpace(changedPaths), "\n") {+ path := strings.TrimSpace(line)
+ if path == "" {+ continue
+ }
+
+ mightContain, err := filter.MightContain([]byte(path))
+ if err != nil {+ tb.Fatalf("MightContain(%q): %v", path, err)+ }
+
+ if !mightContain {+ tb.Fatalf("Bloom filter false negative for commit %s path %q", commitID, path)+ }
+ }
+}
+
+func parseOIDLine(tb testing.TB, algo objectid.Algorithm, line string) []objectid.ObjectID {+ tb.Helper()
+
+ toks := strings.Fields(line)
+
+ out := make([]objectid.ObjectID, 0, len(toks))
+ for _, tok := range toks {+ id, err := objectid.ParseHex(algo, tok)
+ if err != nil {+ tb.Fatalf("parse object id %q: %v", tok, err)+ }
+
+ out = append(out, id)
+ }
+
+ return out
+}
--- /dev/null
+++ b/commitgraph/read/reader.go
@@ -1,0 +1,14 @@
+package read
+
+import "codeberg.org/lindenii/furgit/objectid"
+
+// Reader provides read-only access to one mmap-backed commit-graph snapshot.
+//
+// It is safe for concurrent read-only queries.
+type Reader struct {+ algo objectid.Algorithm
+ hashVersion uint8
+
+ layers []layer
+ total uint32
+}
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/HEAD
@@ -1,0 +1,1 @@
+ref: refs/heads/master
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/config
@@ -1,0 +1,4 @@
+[core]
+ repositoryformatversion = 0
+ filemode = true
+ bare = true
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/info/commit-graphs/commit-graph-chain
@@ -1,0 +1,2 @@
+dd7578d5216ca76c25b19631ba90f7498aeabbe7
+bf985c21612a52070d8b008e6ef51edf8b609401
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/info/commit-graphs/graph-bf985c21612a52070d8b008e6ef51edf8b609401.graph differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/info/commit-graphs/graph-dd7578d5216ca76c25b19631ba90f7498aeabbe7.graph differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/info/packs
@@ -1,0 +1,2 @@
+P pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.pack
+
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/pack/pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.bitmap differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/pack/pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.idx differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/pack/pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.pack differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/pack/pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.rev differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/refs/heads/master
@@ -1,0 +1,1 @@
+46ca641fd65e566b8ecfa567a1f01766289192f8
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/HEAD
@@ -1,0 +1,1 @@
+ref: refs/heads/main
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/config
@@ -1,0 +1,4 @@
+[core]
+ repositoryformatversion = 0
+ filemode = true
+ bare = true
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/info/commit-graph differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/info/packs
@@ -1,0 +1,2 @@
+P pack-34e9e132566989e2abfe8821731236c77f9bcbe9.pack
+
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/pack/pack-34e9e132566989e2abfe8821731236c77f9bcbe9.bitmap differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/pack/pack-34e9e132566989e2abfe8821731236c77f9bcbe9.idx differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/pack/pack-34e9e132566989e2abfe8821731236c77f9bcbe9.pack differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/pack/pack-34e9e132566989e2abfe8821731236c77f9bcbe9.rev differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/refs/heads/main
@@ -1,0 +1,1 @@
+d02a8dbd1a8fbaac8ab7f7f1533cc312ab2c9eec
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/HEAD
@@ -1,0 +1,1 @@
+ref: refs/heads/master
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/config
@@ -1,0 +1,4 @@
+[core]
+ repositoryformatversion = 0
+ filemode = true
+ bare = true
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/info/commit-graph differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/info/packs
@@ -1,0 +1,2 @@
+P pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.pack
+
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/pack/pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.bitmap differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/pack/pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.idx differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/pack/pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.pack differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/pack/pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.rev differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/refs/heads/master
@@ -1,0 +1,1 @@
+dda8217252bdf3e01fdf31309d0e5c3051b00945
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/HEAD
@@ -1,0 +1,1 @@
+ref: refs/heads/master
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/config
@@ -1,0 +1,6 @@
+[extensions]
+ objectformat = sha256
+[core]
+ repositoryformatversion = 1
+ filemode = true
+ bare = true
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/info/commit-graphs/commit-graph-chain
@@ -1,0 +1,2 @@
+505cab61f8ddfa614301e8f97943112739236c6bcd19ed4d1f7c6b830cab4f62
+77c47bd6ca2ce17208c9361717a5823c0cb4b5ee336a14959678e060d674ffb6
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/info/commit-graphs/graph-505cab61f8ddfa614301e8f97943112739236c6bcd19ed4d1f7c6b830cab4f62.graph differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/info/commit-graphs/graph-77c47bd6ca2ce17208c9361717a5823c0cb4b5ee336a14959678e060d674ffb6.graph differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/info/packs
@@ -1,0 +1,2 @@
+P pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.pack
+
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/pack/pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.bitmap differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/pack/pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.idx differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/pack/pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.pack differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/pack/pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.rev differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/refs/heads/master
@@ -1,0 +1,1 @@
+10d2943dc7ad88011cae3b776d9565d6451a350ce1d16949bc8546a5fe6c0a53
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/HEAD
@@ -1,0 +1,1 @@
+ref: refs/heads/main
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/config
@@ -1,0 +1,6 @@
+[extensions]
+ objectformat = sha256
+[core]
+ repositoryformatversion = 1
+ filemode = true
+ bare = true
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/info/commit-graph differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/info/packs
@@ -1,0 +1,2 @@
+P pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.pack
+
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/pack/pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.bitmap differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/pack/pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.idx differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/pack/pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.pack differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/pack/pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.rev differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/refs/heads/main
@@ -1,0 +1,1 @@
+a9ff114900e6be139ec66a2a61c930973d8c4bc6fd3b899405ee7ab8740bdbd3
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/HEAD
@@ -1,0 +1,1 @@
+ref: refs/heads/master
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/config
@@ -1,0 +1,6 @@
+[extensions]
+ objectformat = sha256
+[core]
+ repositoryformatversion = 1
+ filemode = true
+ bare = true
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/info/commit-graph differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/info/packs
@@ -1,0 +1,2 @@
+P pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.pack
+
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/pack/pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.bitmap differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/pack/pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.idx differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/pack/pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.pack differ
binary files /dev/null b/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/pack/pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.rev differ
--- /dev/null
+++ b/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/refs/heads/master
@@ -1,0 +1,1 @@
+7e396bf648e3b045c293d9fbdc533d4377d4e801d5d1fb57b84d22dd054a5860
--- a/format/commitgraph/TODO
+++ /dev/null
@@ -1,6 +1,0 @@
-Paranoia mode
-Split commit-graph chain with mixed generation and bloom setting
-Separate chunk parsing layer
-Config stuff
-
-Writing
--- a/format/commitgraph/bloom/bloom.go
+++ /dev/null
@@ -1,3 +1,0 @@
-// Package bloom provides a bloom filter implementation used for changed-path
-// filters in Git commit graphs.
-package bloom
--- a/format/commitgraph/bloom/constants.go
+++ /dev/null
@@ -1,8 +1,0 @@
-package bloom
-
-const (
- // DataHeaderSize is the size of the BDAT header in commit-graph files.
- DataHeaderSize = 3 * 4
- // DefaultMaxChange matches Git's default max-changed-paths behavior.
- DefaultMaxChange = 512
-)
--- a/format/commitgraph/bloom/contain.go
+++ /dev/null
@@ -1,25 +1,0 @@
-package bloom
-
-// MightContain reports whether the Bloom filter may contain the given path.
-//
-// Evaluated against the full path and each of its directory prefixes. A true
-// result indicates a possible match; false means the path definitely did not
-// change.
-func (f *Filter) MightContain(path []byte) (bool, error) {- if len(f.Data) == 0 {- return false, nil
- }
-
- keys, err := keyvec(path, f)
- if err != nil {- return false, err
- }
-
- for i := range keys {- if filterContainsKey(f, keys[i]) {- return true, nil
- }
- }
-
- return false, nil
-}
--- a/format/commitgraph/bloom/errors.go
+++ /dev/null
@@ -1,5 +1,0 @@
-package bloom
-
-import "errors"
-
-var ErrInvalid = errors.New("bloom: invalid data")--- a/format/commitgraph/bloom/filter.go
+++ /dev/null
@@ -1,28 +1,0 @@
-package bloom
-
-// Filter represents a changed-paths Bloom filter associated with a commit.
-//
-// The filter encodes which paths changed between a commit and its first
-// parent. Paths are expected to be in Git's slash-separated form and
-// are queried using a path and its prefixes (e.g. "a/b/c", "a/b", "a").
-type Filter struct {- Data []byte
-
- HashVersion uint32
- NumHashes uint32
- BitsPerEntry uint32
- MaxChangePaths uint32
-}
-
-// NewFilter constructs one query-ready bloom filter from raw data/settings.
-func NewFilter(data []byte, settings Settings) *Filter {- out := &Filter{- Data: data,
- HashVersion: settings.HashVersion,
- NumHashes: settings.NumHashes,
- BitsPerEntry: settings.BitsPerEntry,
- MaxChangePaths: settings.MaxChangePaths,
- }
-
- return out
-}
--- a/format/commitgraph/bloom/key.go
+++ /dev/null
@@ -1,117 +1,0 @@
-package bloom
-
-import "codeberg.org/lindenii/furgit/internal/intconv"
-
-type key struct {- hashes []uint32
-}
-
-func keyvec(path []byte, filter *Filter) ([]key, error) {- if len(path) == 0 {- return nil, nil
- }
-
- count := 1
-
- for _, b := range path {- if b == '/' {- count++
- }
- }
-
- keys := make([]key, 0, count)
-
- full, err := keyFill(path, filter)
- if err != nil {- return nil, err
- }
-
- keys = append(keys, full)
-
- for i := len(path) - 1; i >= 0; i-- {- if path[i] == '/' {- k, err := keyFill(path[:i], filter)
- if err != nil {- return nil, err
- }
-
- keys = append(keys, k)
- }
- }
-
- return keys, nil
-}
-
-func keyFill(path []byte, filter *Filter) (key, error) {- const (
- seed0 = 0x293ae76f
- seed1 = 0x7e646e2c
- )
-
- var (
- h0 uint32
- h1 uint32
- err error
- )
-
- switch filter.HashVersion {- case 2:
- h0, err = murmur3SeededV2(seed0, path)
- if err != nil {- return key{}, err- }
-
- h1, err = murmur3SeededV2(seed1, path)
- if err != nil {- return key{}, err- }
- case 1:
- h0, err = murmur3SeededV1(seed0, path)
- if err != nil {- return key{}, err- }
-
- h1, err = murmur3SeededV1(seed1, path)
- if err != nil {- return key{}, err- }
- default:
- return key{}, ErrInvalid- }
-
- hashCount, err := intconv.Uint32ToInt(filter.NumHashes)
- if err != nil {- return key{}, ErrInvalid- }
-
- hashes := make([]uint32, hashCount)
- for i := range hashCount {- iU32, err := intconv.IntToUint32(i)
- if err != nil {- return key{}, ErrInvalid- }
-
- hashes[i] = h0 + iU32*h1
- }
-
- return key{hashes: hashes}, nil-}
-
-func filterContainsKey(filter *Filter, key key) bool {- if len(filter.Data) == 0 {- return false
- }
-
- mod := uint64(len(filter.Data)) * 8
- for _, h := range key.hashes {- idx := uint64(h) % mod
- bytePos := idx / 8
-
- bit := byte(1 << (idx & 7))
- if filter.Data[bytePos]&bit == 0 {- return false
- }
- }
-
- return true
-}
--- a/format/commitgraph/bloom/murmur.go
+++ /dev/null
@@ -1,127 +1,0 @@
-package bloom
-
-import "codeberg.org/lindenii/furgit/internal/intconv"
-
-func murmur3SeededV2(seed uint32, data []byte) (uint32, error) {- const (
- c1 = 0xcc9e2d51
- c2 = 0x1b873593
- r1 = 15
- r2 = 13
- m = 5
- n = 0xe6546b64
- )
-
- h := seed
-
- nblocks := len(data) / 4
- for i := range nblocks {- k := uint32(data[4*i]) |
- (uint32(data[4*i+1]) << 8) |
- (uint32(data[4*i+2]) << 16) |
- (uint32(data[4*i+3]) << 24)
- k *= c1
- k = (k << r1) | (k >> (32 - r1))
- k *= c2
-
- h ^= k
- h = (h << r2) | (h >> (32 - r2))
- h = h*m + n
- }
-
- var k1 uint32
-
- tail := data[nblocks*4:]
- switch len(tail) & 3 {- case 3:
- k1 ^= uint32(tail[2]) << 16
-
- fallthrough
- case 2:
- k1 ^= uint32(tail[1]) << 8
-
- fallthrough
- case 1:
- k1 ^= uint32(tail[0])
- k1 *= c1
- k1 = (k1 << r1) | (k1 >> (32 - r1))
- k1 *= c2
- h ^= k1
- }
-
- dataLen, err := intconv.IntToUint32(len(data))
- if err != nil {- return 0, err
- }
-
- h ^= dataLen
- h ^= h >> 16
- h *= 0x85ebca6b
- h ^= h >> 13
- h *= 0xc2b2ae35
- h ^= h >> 16
-
- return h, nil
-}
-
-func murmur3SeededV1(seed uint32, data []byte) (uint32, error) {- const (
- c1 = 0xcc9e2d51
- c2 = 0x1b873593
- r1 = 15
- r2 = 13
- m = 5
- n = 0xe6546b64
- )
-
- h := seed
-
- nblocks := len(data) / 4
- for i := range nblocks {- k := intconv.SignExtendByteToUint32(data[4*i]) |
- (intconv.SignExtendByteToUint32(data[4*i+1]) << 8) |
- (intconv.SignExtendByteToUint32(data[4*i+2]) << 16) |
- (intconv.SignExtendByteToUint32(data[4*i+3]) << 24)
- k *= c1
- k = (k << r1) | (k >> (32 - r1))
- k *= c2
-
- h ^= k
- h = (h << r2) | (h >> (32 - r2))
- h = h*m + n
- }
-
- var k1 uint32
-
- tail := data[nblocks*4:]
- switch len(tail) & 3 {- case 3:
- k1 ^= intconv.SignExtendByteToUint32(tail[2]) << 16
-
- fallthrough
- case 2:
- k1 ^= intconv.SignExtendByteToUint32(tail[1]) << 8
-
- fallthrough
- case 1:
- k1 ^= intconv.SignExtendByteToUint32(tail[0])
- k1 *= c1
- k1 = (k1 << r1) | (k1 >> (32 - r1))
- k1 *= c2
- h ^= k1
- }
-
- dataLen, err := intconv.IntToUint32(len(data))
- if err != nil {- return 0, err
- }
-
- h ^= dataLen
- h ^= h >> 16
- h *= 0x85ebca6b
- h ^= h >> 13
- h *= 0xc2b2ae35
- h ^= h >> 16
-
- return h, nil
-}
--- a/format/commitgraph/bloom/settings.go
+++ /dev/null
@@ -1,50 +1,0 @@
-package bloom
-
-import (
- "encoding/binary"
-
- "codeberg.org/lindenii/furgit/internal/intconv"
-)
-
-// Settings describe the changed-paths Bloom filter parameters stored in
-// commit-graph BDAT chunks.
-//
-// Obviously, they must match the repository's commit-graph settings to
-// interpret filters correctly.
-type Settings struct {- HashVersion uint32
- NumHashes uint32
- BitsPerEntry uint32
- MaxChangePaths uint32
-}
-
-// ParseSettings reads Bloom filter settings from a BDAT chunk header.
-func ParseSettings(bdat []byte) (*Settings, error) {- if len(bdat) < DataHeaderSize {- return nil, ErrInvalid
- }
-
- settings := &Settings{- HashVersion: binary.BigEndian.Uint32(bdat[0:4]),
- NumHashes: binary.BigEndian.Uint32(bdat[4:8]),
- BitsPerEntry: binary.BigEndian.Uint32(bdat[8:12]),
- MaxChangePaths: DefaultMaxChange,
- }
-
- switch settings.HashVersion {- case 1, 2:
- default:
- return nil, ErrInvalid
- }
-
- if settings.NumHashes == 0 {- return nil, ErrInvalid
- }
-
- _, err := intconv.Uint32ToInt(settings.NumHashes)
- if err != nil {- return nil, ErrInvalid
- }
-
- return settings, nil
-}
--- a/format/commitgraph/constants.go
+++ /dev/null
@@ -1,32 +1,0 @@
-package commitgraph
-
-const (
- FileSignature = 0x43475048 // "CGPH"
- FileVersion = 1
-)
-
-const (
- ChunkOIDF = 0x4f494446 // "OIDF"
- ChunkOIDL = 0x4f49444c // "OIDL"
- ChunkCDAT = 0x43444154 // "CDAT"
- ChunkGDA2 = 0x47444132 // "GDA2"
- ChunkGDO2 = 0x47444f32 // "GDO2"
- ChunkEDGE = 0x45444745 // "EDGE"
- ChunkBIDX = 0x42494458 // "BIDX"
- ChunkBDAT = 0x42444154 // "BDAT"
- ChunkBASE = 0x42415345 // "BASE"
-)
-
-const (
- HeaderSize = 8
- ChunkEntrySize = 12
- FanoutSize = 256 * 4
-)
-
-const (
- ParentNone = 0x70000000
- ParentExtraMask = 0x80000000
- ParentLastMask = 0x7fffffff
-
- GenerationOverflow = 0x80000000
-)
--- a/format/commitgraph/doc.go
+++ /dev/null
@@ -1,2 +1,0 @@
-// Package commitgraph provides constants and common utilities for handling commit graphs.
-package commitgraph
--- a/format/commitgraph/read/bloom.go
+++ /dev/null
@@ -1,114 +1,0 @@
-package read
-
-import (
- "encoding/binary"
-
- "codeberg.org/lindenii/furgit/format/commitgraph/bloom"
- "codeberg.org/lindenii/furgit/internal/intconv"
-)
-
-// HasBloom reports whether any layer has changed-path Bloom data.
-func (reader *Reader) HasBloom() bool {- for i := range reader.layers {- layer := &reader.layers[i]
- if layer.chunkBloomIndex != nil && layer.chunkBloomData != nil && layer.bloomSettings != nil {- return true
- }
- }
-
- return false
-}
-
-// BloomVersion returns the changed-path Bloom hash version, or 0 if absent.
-func (reader *Reader) BloomVersion() uint8 {- for i := len(reader.layers) - 1; i >= 0; i-- {- layer := &reader.layers[i]
- if layer.bloomSettings != nil {- version, err := intconv.Uint32ToUint8(layer.bloomSettings.HashVersion)
- if err != nil {- return 0
- }
-
- return version
- }
- }
-
- return 0
-}
-
-// BloomFilterAt returns one commit's changed-path Bloom filter.
-//
-// Returns BloomUnavailableError when this commit graph has no Bloom data.
-func (reader *Reader) BloomFilterAt(pos Position) (*bloom.Filter, error) {- layer, err := reader.layerByPosition(pos)
- if err != nil {- return nil, err
- }
-
- if layer.chunkBloomIndex == nil || layer.chunkBloomData == nil || layer.bloomSettings == nil {- return nil, &BloomUnavailableError{Pos: pos}- }
-
- start, end, err := bloomRange(layer, pos.Index)
- if err != nil {- return nil, err
- }
-
- filter := bloom.NewFilter(
- layer.chunkBloomData[bloom.DataHeaderSize+start:bloom.DataHeaderSize+end],
- *layer.bloomSettings,
- )
-
- return filter, nil
-}
-
-func bloomRange(layer *layer, commitIndex uint32) (int, int, error) {- off64 := uint64(commitIndex) * 4
-
- off, err := intconv.Uint64ToInt(off64)
- if err != nil {- return 0, 0, err
- }
-
- end := binary.BigEndian.Uint32(layer.chunkBloomIndex[off : off+4])
-
- var start uint32
-
- if commitIndex > 0 {- prevOff64 := uint64(commitIndex-1) * 4
-
- prevOff, err := intconv.Uint64ToInt(prevOff64)
- if err != nil {- return 0, 0, err
- }
-
- start = binary.BigEndian.Uint32(layer.chunkBloomIndex[prevOff : prevOff+4])
- }
-
- if end < start {- return 0, 0, &MalformedError{Path: layer.path, Reason: "invalid BIDX range"}- }
-
- bdatLen := len(layer.chunkBloomData) - bloom.DataHeaderSize
-
- bdatLenU32, err := intconv.IntToUint32(bdatLen)
- if err != nil {- return 0, 0, err
- }
-
- if end > bdatLenU32 {- return 0, 0, &MalformedError{Path: layer.path, Reason: "BIDX range out of BDAT bounds"}- }
-
- startInt, err := intconv.Uint64ToInt(uint64(start))
- if err != nil {- return 0, 0, err
- }
-
- endInt, err := intconv.Uint64ToInt(uint64(end))
- if err != nil {- return 0, 0, err
- }
-
- return startInt, endInt, nil
-}
--- a/format/commitgraph/read/close.go
+++ /dev/null
@@ -1,18 +1,0 @@
-package read
-
-// Close releases all mapped commit-graph files.
-func (reader *Reader) Close() error {- var closeErr error
-
- for i := len(reader.layers) - 1; i >= 0; i-- {- err := reader.layers[i].close()
- if err != nil && closeErr == nil {- closeErr = err
- }
- }
-
- reader.layers = nil
- reader.total = 0
-
- return closeErr
-}
--- a/format/commitgraph/read/commitat.go
+++ /dev/null
@@ -1,85 +1,0 @@
-package read
-
-import (
- "encoding/binary"
-
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-// CommitAt returns decoded commit-graph metadata at one position.
-func (reader *Reader) CommitAt(pos Position) (Commit, error) {- layer, err := reader.layerByPosition(pos)
- if err != nil {- return Commit{}, err- }
-
- hashSize := reader.algo.Size()
- stride := hashSize + 16
-
- strideU64, err := intconv.IntToUint64(stride)
- if err != nil {- return Commit{}, err- }
-
- start64 := uint64(pos.Index) * strideU64
- end64 := start64 + strideU64
-
- start, err := intconv.Uint64ToInt(start64)
- if err != nil {- return Commit{}, err- }
-
- end, err := intconv.Uint64ToInt(end64)
- if err != nil {- return Commit{}, err- }
-
- record := layer.chunkCommit[start:end]
-
- treeOID, err := objectid.FromBytes(reader.algo, record[:hashSize])
- if err != nil {- return Commit{}, err- }
-
- oid, err := reader.OIDAt(pos)
- if err != nil {- return Commit{}, err- }
-
- p1 := binary.BigEndian.Uint32(record[hashSize : hashSize+4])
- p2 := binary.BigEndian.Uint32(record[hashSize+4 : hashSize+8])
- genAndTimeHi := binary.BigEndian.Uint32(record[hashSize+8 : hashSize+12])
- timeLow := binary.BigEndian.Uint32(record[hashSize+12 : hashSize+16])
-
- timeHigh := uint64(genAndTimeHi & 0x3)
- commitTimeU64 := (timeHigh << 32) | uint64(timeLow)
-
- commitTime, err := intconv.Uint64ToInt64(commitTimeU64)
- if err != nil {- return Commit{}, err- }
-
- generationV1 := genAndTimeHi >> 2
-
- generationV2, err := reader.readGenerationV2(layer, pos.Index, commitTimeU64)
- if err != nil {- return Commit{}, err- }
-
- parent1, parent2, extra, err := reader.decodeParents(layer, p1, p2)
- if err != nil {- return Commit{}, err- }
-
- return Commit{- OID: oid,
- TreeOID: treeOID,
- Parent1: parent1,
- Parent2: parent2,
- ExtraParents: extra,
- CommitTimeUnix: commitTime,
- GenerationV1: generationV1,
- GenerationV2: generationV2,
- }, nil
-}
--- a/format/commitgraph/read/commits.go
+++ /dev/null
@@ -1,20 +1,0 @@
-package read
-
-import "codeberg.org/lindenii/furgit/objectid"
-
-// Commit stores decoded commit-graph record data.
-type Commit struct {- OID objectid.ObjectID
- TreeOID objectid.ObjectID
- Parent1 ParentRef
- Parent2 ParentRef
- ExtraParents []Position
- CommitTimeUnix int64
- GenerationV1 uint32
- GenerationV2 uint64
-}
-
-// NumCommits returns total commits across loaded layers.
-func (reader *Reader) NumCommits() uint32 {- return reader.total
-}
--- a/format/commitgraph/read/doc.go
+++ /dev/null
@@ -1,2 +1,0 @@
-// Package read provides routines for reading commit graphs.
-package read
--- a/format/commitgraph/read/edges.go
+++ /dev/null
@@ -1,48 +1,0 @@
-package read
-
-import (
- "encoding/binary"
-
- "codeberg.org/lindenii/furgit/format/commitgraph"
- "codeberg.org/lindenii/furgit/internal/intconv"
-)
-
-func (reader *Reader) decodeExtraEdgeList(layer *layer, edgeStart uint32) ([]Position, error) {- if len(layer.chunkExtraEdges) == 0 {- return nil, &MalformedError{Path: layer.path, Reason: "missing EDGE chunk"}- }
-
- out := make([]Position, 0)
-
- cur := edgeStart
- for {- off64 := uint64(cur) * 4
-
- off, err := intconv.Uint64ToInt(off64)
- if err != nil {- return nil, err
- }
-
- if off+4 > len(layer.chunkExtraEdges) {- return nil, &MalformedError{Path: layer.path, Reason: "EDGE index out of range"}- }
-
- word := binary.BigEndian.Uint32(layer.chunkExtraEdges[off : off+4])
- parentGlobal := word & commitgraph.ParentLastMask
-
- parentPos, err := reader.globalToPosition(parentGlobal)
- if err != nil {- return nil, err
- }
-
- out = append(out, parentPos)
-
- if word&commitgraph.ParentExtraMask != 0 {- break
- }
-
- cur++
- }
-
- return out, nil
-}
--- a/format/commitgraph/read/errors.go
+++ /dev/null
@@ -1,58 +1,0 @@
-package read
-
-import (
- "fmt"
-
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-// NotFoundError reports a missing commit graph entry by object ID.
-type NotFoundError struct {- OID objectid.ObjectID
-}
-
-// Error implements error.
-func (err *NotFoundError) Error() string {- return fmt.Sprintf("format/commitgraph: object not found: %s", err.OID)-}
-
-// PositionOutOfRangeError reports an invalid graph position.
-type PositionOutOfRangeError struct {- Pos Position
-}
-
-// Error implements error.
-func (err *PositionOutOfRangeError) Error() string {- return fmt.Sprintf("format/commitgraph: position out of range: graph=%d index=%d", err.Pos.Graph, err.Pos.Index)-}
-
-// MalformedError reports malformed commit-graph data.
-type MalformedError struct {- Path string
- Reason string
-}
-
-// Error implements error.
-func (err *MalformedError) Error() string {- return fmt.Sprintf("format/commitgraph: malformed %q: %s", err.Path, err.Reason)-}
-
-// UnsupportedVersionError reports unsupported commit-graph version.
-type UnsupportedVersionError struct {- Version uint8
-}
-
-// Error implements error.
-func (err *UnsupportedVersionError) Error() string {- return fmt.Sprintf("format/commitgraph: unsupported version %d", err.Version)-}
-
-// BloomUnavailableError reports missing changed-path bloom data at one position.
-type BloomUnavailableError struct {- Pos Position
-}
-
-// Error implements error.
-func (err *BloomUnavailableError) Error() string {- return fmt.Sprintf("format/commitgraph: bloom unavailable at position graph=%d index=%d", err.Pos.Graph, err.Pos.Index)-}
--- a/format/commitgraph/read/generation.go
+++ /dev/null
@@ -1,43 +1,0 @@
-package read
-
-import (
- "encoding/binary"
-
- "codeberg.org/lindenii/furgit/format/commitgraph"
- "codeberg.org/lindenii/furgit/internal/intconv"
-)
-
-func (reader *Reader) readGenerationV2(layer *layer, index uint32, commitTime uint64) (uint64, error) {- if len(layer.chunkGeneration) == 0 {- return 0, nil
- }
-
- off64 := uint64(index) * 4
-
- off, err := intconv.Uint64ToInt(off64)
- if err != nil {- return 0, err
- }
-
- value := binary.BigEndian.Uint32(layer.chunkGeneration[off : off+4])
-
- if value&commitgraph.GenerationOverflow == 0 {- return commitTime + uint64(value), nil
- }
-
- gdo2Index := value ^ commitgraph.GenerationOverflow
- gdo2Off64 := uint64(gdo2Index) * 8
-
- gdo2Off, err := intconv.Uint64ToInt(gdo2Off64)
- if err != nil {- return 0, err
- }
-
- if gdo2Off+8 > len(layer.chunkGenerationOv) {- return 0, &MalformedError{Path: layer.path, Reason: "GDO2 index out of range"}- }
-
- overflow := binary.BigEndian.Uint64(layer.chunkGenerationOv[gdo2Off : gdo2Off+8])
-
- return commitTime + overflow, nil
-}
--- a/format/commitgraph/read/hash.go
+++ /dev/null
@@ -1,79 +1,0 @@
-package read
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-// HashVersion returns the commit-graph hash version.
-func (reader *Reader) HashVersion() uint8 {- return reader.hashVersion
-}
-
-func validateChainBaseHashes(algo objectid.Algorithm, chain []string, idx int, graph *layer) error {- if idx == 0 {- if len(graph.chunkBaseGraphs) != 0 {- return &MalformedError{Path: graph.path, Reason: "unexpected BASE chunk in first graph"}- }
-
- return nil
- }
-
- hashSize := algo.Size()
-
- expectedLen := idx * hashSize
- if len(graph.chunkBaseGraphs) != expectedLen {- return &MalformedError{- Path: graph.path,
- Reason: fmt.Sprintf("BASE chunk length %d does not match expected %d", len(graph.chunkBaseGraphs), expectedLen),- }
- }
-
- for i := range idx {- start := i * hashSize
- end := start + hashSize
-
- baseHash, err := objectid.FromBytes(algo, graph.chunkBaseGraphs[start:end])
- if err != nil {- return err
- }
-
- if baseHash.String() != chain[i] {- return &MalformedError{- Path: graph.path,
- Reason: fmt.Sprintf("BASE chunk mismatch at index %d", i),- }
- }
- }
-
- return nil
-}
-
-func verifyTrailerHash(data []byte, algo objectid.Algorithm, path string) error {- hashSize := algo.Size()
- if len(data) < hashSize {- return &MalformedError{Path: path, Reason: "file too short for trailer"}- }
-
- hashImpl, err := algo.New()
- if err != nil {- return err
- }
-
- _, err = io.Copy(hashImpl, bytes.NewReader(data[:len(data)-hashSize]))
- if err != nil {- return err
- }
-
- got := hashImpl.Sum(nil)
-
- want := data[len(data)-hashSize:]
- if !bytes.Equal(got, want) {- return &MalformedError{Path: path, Reason: "trailer hash mismatch"}- }
-
- return nil
-}
--- a/format/commitgraph/read/iterators.go
+++ /dev/null
@@ -1,45 +1,0 @@
-package read
-
-import (
- "iter"
-
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-// AllPositions iterates all commit positions in native layer order.
-func (reader *Reader) AllPositions() iter.Seq[Position] {- return func(yield func(Position) bool) {- for layerIdx := range reader.layers {- layer := &reader.layers[layerIdx]
-
- graph, err := intconv.IntToUint32(layerIdx)
- if err != nil {- return
- }
-
- for idx := range layer.numCommits {- if !yield(Position{Graph: graph, Index: idx}) {- return
- }
- }
- }
- }
-}
-
-// AllOIDs iterates all commit object IDs in native layer order.
-func (reader *Reader) AllOIDs() iter.Seq[objectid.ObjectID] {- return func(yield func(objectid.ObjectID) bool) {- positions := reader.AllPositions()
- for pos := range positions {- oid, err := reader.OIDAt(pos)
- if err != nil {- return
- }
-
- if !yield(oid) {- return
- }
- }
- }
-}
--- a/format/commitgraph/read/layer.go
+++ /dev/null
@@ -1,28 +1,0 @@
-package read
-
-import (
- "os"
-
- "codeberg.org/lindenii/furgit/format/commitgraph/bloom"
-)
-
-type layer struct {- path string
- file *os.File
- data []byte
- numCommits uint32
- baseCount uint32
- globalFrom uint32
-
- chunkOIDFanout []byte
- chunkOIDLookup []byte
- chunkCommit []byte
- chunkGeneration []byte
- chunkGenerationOv []byte
- chunkExtraEdges []byte
- chunkBloomIndex []byte
- chunkBloomData []byte
- chunkBaseGraphs []byte
-
- bloomSettings *bloom.Settings
-}
--- a/format/commitgraph/read/layer_close.go
+++ /dev/null
@@ -1,33 +1,0 @@
-package read
-
-import "syscall"
-
-func closeLayers(layers []layer) {- for i := len(layers) - 1; i >= 0; i-- {- _ = layers[i].close()
- }
-}
-
-func (layer *layer) close() error {- var closeErr error
-
- if layer.data != nil {- err := syscall.Munmap(layer.data)
- if err != nil {- closeErr = err
- }
-
- layer.data = nil
- }
-
- if layer.file != nil {- err := layer.file.Close()
- if err != nil && closeErr == nil {- closeErr = err
- }
-
- layer.file = nil
- }
-
- return closeErr
-}
--- a/format/commitgraph/read/layer_lookup.go
+++ /dev/null
@@ -1,53 +1,0 @@
-package read
-
-import (
- "bytes"
- "encoding/binary"
-
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-func layerLookup(layer *layer, oid objectid.ObjectID) (uint32, bool) {- hashSize := oid.Size()
- first := int(oid.RawBytes()[0])
-
- var lo uint32
- if first > 0 {- lo = binary.BigEndian.Uint32(layer.chunkOIDFanout[(first-1)*4 : first*4])
- }
-
- hi := binary.BigEndian.Uint32(layer.chunkOIDFanout[first*4 : (first+1)*4])
- if hi == 0 || lo >= hi {- return 0, false
- }
-
- target := oid.RawBytes()
- left := int(lo)
-
- right := int(hi) - 1
- for left <= right {- mid := left + (right-left)/2
- start := mid * hashSize
- end := start + hashSize
-
- current := layer.chunkOIDLookup[start:end]
-
- cmp := bytes.Compare(current, target)
- switch {- case cmp == 0:
- pos, err := intconv.IntToUint32(mid)
- if err != nil {- return 0, false
- }
-
- return pos, true
- case cmp < 0:
- left = mid + 1
- default:
- right = mid - 1
- }
- }
-
- return 0, false
-}
--- a/format/commitgraph/read/layer_open.go
+++ /dev/null
@@ -1,81 +1,0 @@
-package read
-
-import (
- "os"
- "syscall"
-
- "codeberg.org/lindenii/furgit/format/commitgraph"
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-func openLayer(root *os.Root, relPath string, algo objectid.Algorithm) (*layer, error) {- file, err := root.Open(relPath)
- if err != nil {- return nil, err
- }
-
- info, err := file.Stat()
- if err != nil {- _ = file.Close()
-
- return nil, err
- }
-
- size := info.Size()
- if size < int64(commitgraph.HeaderSize+commitgraph.FanoutSize+algo.Size()) {- _ = file.Close()
-
- return nil, &MalformedError{Path: relPath, Reason: "file too short"}- }
-
- mapLen, err := intconv.Int64ToUint64(size)
- if err != nil {- _ = file.Close()
-
- return nil, err
- }
-
- mapLenInt, err := intconv.Uint64ToInt(mapLen)
- if err != nil {- _ = file.Close()
-
- return nil, err
- }
-
- fd, err := intconv.UintptrToInt(file.Fd())
- if err != nil {- _ = file.Close()
-
- return nil, err
- }
-
- data, err := syscall.Mmap(fd, 0, mapLenInt, syscall.PROT_READ, syscall.MAP_PRIVATE)
- if err != nil {- _ = file.Close()
-
- return nil, err
- }
-
- out := &layer{- path: relPath,
- file: file,
- data: data,
- }
-
- parseErr := parseLayer(out, algo)
- if parseErr != nil {- _ = out.close()
-
- return nil, parseErr
- }
-
- verifyErr := verifyTrailerHash(out.data, algo, relPath)
- if verifyErr != nil {- _ = out.close()
-
- return nil, verifyErr
- }
-
- return out, nil
-}
--- a/format/commitgraph/read/layer_parse.go
+++ /dev/null
@@ -1,276 +1,0 @@
-package read
-
-import (
- "encoding/binary"
-
- "codeberg.org/lindenii/furgit/format/commitgraph"
- "codeberg.org/lindenii/furgit/format/commitgraph/bloom"
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-func parseLayer(layer *layer, algo objectid.Algorithm) error { //nolint:maintidx- if len(layer.data) < commitgraph.HeaderSize {- return &MalformedError{Path: layer.path, Reason: "file too short"}- }
-
- header := layer.data[:commitgraph.HeaderSize]
-
- signature := binary.BigEndian.Uint32(header[:4])
- if signature != commitgraph.FileSignature {- return &MalformedError{Path: layer.path, Reason: "invalid signature"}- }
-
- version := header[4]
- if version != commitgraph.FileVersion {- return &UnsupportedVersionError{Version: version}- }
-
- expectedHashVersion, err := intconv.Uint32ToUint8(algo.PackHashID())
- if err != nil {- return err
- }
-
- hashVersion := header[5]
- if hashVersion != expectedHashVersion {- return &MalformedError{Path: layer.path, Reason: "hash version does not match object format"}- }
-
- numChunks := int(header[6])
- baseCount := uint32(header[7])
-
- tocLen := (numChunks + 1) * commitgraph.ChunkEntrySize
- tocStart := commitgraph.HeaderSize
-
- tocEnd := tocStart + tocLen
- if tocEnd > len(layer.data) {- return &MalformedError{Path: layer.path, Reason: "truncated chunk table"}- }
-
- type tocEntry struct {- id uint32
- offset uint64
- }
-
- entries := make([]tocEntry, 0, numChunks+1)
- for i := range numChunks + 1 {- entryOff := tocStart + i*commitgraph.ChunkEntrySize
- entryData := layer.data[entryOff : entryOff+commitgraph.ChunkEntrySize]
-
- entry := tocEntry{- id: binary.BigEndian.Uint32(entryData[:4]),
- offset: binary.BigEndian.Uint64(entryData[4:]),
- }
- entries = append(entries, entry)
- }
-
- if entries[len(entries)-1].id != 0 {- return &MalformedError{Path: layer.path, Reason: "missing chunk table terminator"}- }
-
- trailerStart := len(layer.data) - algo.Size()
-
- chunks := make(map[uint32][]byte, numChunks)
- for i := range numChunks {- entry := entries[i]
- if entry.id == 0 {- return &MalformedError{Path: layer.path, Reason: "early chunk table terminator"}- }
-
- next := entries[i+1]
-
- start, err := intconv.Uint64ToInt(entry.offset)
- if err != nil {- return err
- }
-
- end, err := intconv.Uint64ToInt(next.offset)
- if err != nil {- return err
- }
-
- if start < tocEnd || end < start || end > trailerStart {- return &MalformedError{Path: layer.path, Reason: "invalid chunk offsets"}- }
-
- if _, exists := chunks[entry.id]; exists {- return &MalformedError{Path: layer.path, Reason: "duplicate chunk id"}- }
-
- chunks[entry.id] = layer.data[start:end]
- }
-
- oidf := chunks[commitgraph.ChunkOIDF]
- if len(oidf) != commitgraph.FanoutSize {- return &MalformedError{Path: layer.path, Reason: "invalid OIDF length"}- }
-
- layer.chunkOIDFanout = oidf
- layer.numCommits = binary.BigEndian.Uint32(oidf[commitgraph.FanoutSize-4:])
-
- for i := range 255 {- cur := binary.BigEndian.Uint32(oidf[i*4 : (i+1)*4])
-
- next := binary.BigEndian.Uint32(oidf[(i+1)*4 : (i+2)*4])
- if cur > next {- return &MalformedError{Path: layer.path, Reason: "non-monotonic OIDF fanout"}- }
- }
-
- hashSize := algo.Size()
-
- hashSizeU64, err := intconv.IntToUint64(hashSize)
- if err != nil {- return err
- }
-
- oidl := chunks[commitgraph.ChunkOIDL]
- oidlWantLen64 := uint64(layer.numCommits) * hashSizeU64
-
- oidlWantLen, err := intconv.Uint64ToInt(oidlWantLen64)
- if err != nil {- return err
- }
-
- if len(oidl) != oidlWantLen {- return &MalformedError{Path: layer.path, Reason: "invalid OIDL length"}- }
-
- layer.chunkOIDLookup = oidl
-
- stride := hashSize + 16
-
- strideU64, err := intconv.IntToUint64(stride)
- if err != nil {- return err
- }
-
- cdat := chunks[commitgraph.ChunkCDAT]
- cdatWantLen64 := uint64(layer.numCommits) * strideU64
-
- cdatWantLen, err := intconv.Uint64ToInt(cdatWantLen64)
- if err != nil {- return err
- }
-
- if len(cdat) != cdatWantLen {- return &MalformedError{Path: layer.path, Reason: "invalid CDAT length"}- }
-
- layer.chunkCommit = cdat
-
- gda2 := chunks[commitgraph.ChunkGDA2]
- if len(gda2) != 0 {- wantLen64 := uint64(layer.numCommits) * 4
-
- wantLen, err := intconv.Uint64ToInt(wantLen64)
- if err != nil {- return err
- }
-
- if len(gda2) != wantLen {- return &MalformedError{Path: layer.path, Reason: "invalid GDA2 length"}- }
-
- layer.chunkGeneration = gda2
- }
-
- gdo2 := chunks[commitgraph.ChunkGDO2]
- if len(gdo2) != 0 {- if len(gdo2)%8 != 0 {- return &MalformedError{Path: layer.path, Reason: "invalid GDO2 length"}- }
-
- layer.chunkGenerationOv = gdo2
- }
-
- edge := chunks[commitgraph.ChunkEDGE]
- if len(edge) != 0 {- if len(edge)%4 != 0 {- return &MalformedError{Path: layer.path, Reason: "invalid EDGE length"}- }
-
- layer.chunkExtraEdges = edge
- }
-
- base := chunks[commitgraph.ChunkBASE]
- if baseCount == 0 {- if len(base) != 0 {- return &MalformedError{Path: layer.path, Reason: "unexpected BASE chunk"}- }
- } else {- wantLen64 := uint64(baseCount) * hashSizeU64
-
- wantLen, err := intconv.Uint64ToInt(wantLen64)
- if err != nil {- return err
- }
-
- if len(base) != wantLen {- return &MalformedError{Path: layer.path, Reason: "invalid BASE length"}- }
-
- layer.chunkBaseGraphs = base
- }
-
- layer.baseCount = baseCount
-
- bidx := chunks[commitgraph.ChunkBIDX]
-
- bdat := chunks[commitgraph.ChunkBDAT]
- if len(bidx) != 0 || len(bdat) != 0 { //nolint:nestif- if len(bidx) == 0 || len(bdat) == 0 {- return &MalformedError{Path: layer.path, Reason: "BIDX/BDAT must both be present"}- }
-
- bidxWantLen64 := uint64(layer.numCommits) * 4
-
- bidxWantLen, err := intconv.Uint64ToInt(bidxWantLen64)
- if err != nil {- return err
- }
-
- if len(bidx) != bidxWantLen {- return &MalformedError{Path: layer.path, Reason: "invalid BIDX length"}- }
-
- if len(bdat) < bloom.DataHeaderSize {- return &MalformedError{Path: layer.path, Reason: "invalid BDAT length"}- }
-
- settings, err := bloom.ParseSettings(bdat)
- if err != nil {- return err
- }
-
- prev := uint32(0)
-
- for i := range layer.numCommits {- off := int(i) * 4
-
- cur := binary.BigEndian.Uint32(bidx[off : off+4])
- if i > 0 && cur < prev {- return &MalformedError{Path: layer.path, Reason: "non-monotonic BIDX"}- }
-
- bdatDataLen := len(bdat) - bloom.DataHeaderSize
-
- bdatDataLenU32, err := intconv.IntToUint32(bdatDataLen)
- if err != nil {- return err
- }
-
- if cur > bdatDataLenU32 {- return &MalformedError{Path: layer.path, Reason: "BIDX offset out of range"}- }
-
- prev = cur
- }
-
- layer.chunkBloomIndex = bidx
- layer.chunkBloomData = bdat
- layer.bloomSettings = settings
- }
-
- return nil
-}
--- a/format/commitgraph/read/layer_pos.go
+++ /dev/null
@@ -1,21 +1,0 @@
-package read
-
-import "codeberg.org/lindenii/furgit/internal/intconv"
-
-func (reader *Reader) layerByPosition(pos Position) (*layer, error) {- graphIdx, err := intconv.Uint64ToInt(uint64(pos.Graph))
- if err != nil {- return nil, err
- }
-
- if graphIdx < 0 || graphIdx >= len(reader.layers) {- return nil, &PositionOutOfRangeError{Pos: pos}- }
-
- layer := &reader.layers[graphIdx]
- if pos.Index >= layer.numCommits {- return nil, &PositionOutOfRangeError{Pos: pos}- }
-
- return layer, nil
-}
--- a/format/commitgraph/read/layerinfo.go
+++ /dev/null
@@ -1,23 +1,0 @@
-package read
-
-// LayerInfo describes one loaded commit-graph layer.
-type LayerInfo struct {- Path string
- BaseCount uint32
- Commits uint32
-}
-
-// Layers returns loaded layer metadata in native chain order.
-func (reader *Reader) Layers() []LayerInfo {- out := make([]LayerInfo, 0, len(reader.layers))
- for i := range reader.layers {- layer := reader.layers[i]
- out = append(out, LayerInfo{- Path: layer.path,
- BaseCount: layer.baseCount,
- Commits: layer.numCommits,
- })
- }
-
- return out
-}
--- a/format/commitgraph/read/lookup.go
+++ /dev/null
@@ -1,29 +1,0 @@
-package read
-
-import (
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-// Lookup resolves one object ID to one graph position.
-func (reader *Reader) Lookup(oid objectid.ObjectID) (Position, error) {- if oid.Algorithm() != reader.algo {- return Position{}, &NotFoundError{OID: oid}- }
-
- for layerIdx := len(reader.layers) - 1; layerIdx >= 0; layerIdx-- {- layer := &reader.layers[layerIdx]
-
- found, ok := layerLookup(layer, oid)
- if ok {- idxU32, err := intconv.IntToUint32(layerIdx)
- if err != nil {- return Position{}, err- }
-
- return Position{Graph: idxU32, Index: found}, nil- }
- }
-
- return Position{}, &NotFoundError{OID: oid}-}
--- a/format/commitgraph/read/mode.go
+++ /dev/null
@@ -1,11 +1,0 @@
-package read
-
-// OpenMode controls which commit-graph layout Open loads.
-type OpenMode uint8
-
-const (
- // OpenSingle opens one commit-graph file at info/commit-graph.
- OpenSingle OpenMode = iota
- // OpenChain opens chained commit-graphs from info/commit-graphs.
- OpenChain
-)
--- a/format/commitgraph/read/oidat.go
+++ /dev/null
@@ -1,36 +1,0 @@
-package read
-
-import (
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-// OIDAt returns object ID at one position.
-func (reader *Reader) OIDAt(pos Position) (objectid.ObjectID, error) {- layer, err := reader.layerByPosition(pos)
- if err != nil {- return objectid.ObjectID{}, err- }
-
- hashSize := reader.algo.Size()
-
- hashSizeU64, err := intconv.IntToUint64(hashSize)
- if err != nil {- return objectid.ObjectID{}, err- }
-
- start64 := uint64(pos.Index) * hashSizeU64
- end64 := start64 + hashSizeU64
-
- start, err := intconv.Uint64ToInt(start64)
- if err != nil {- return objectid.ObjectID{}, err- }
-
- end, err := intconv.Uint64ToInt(end64)
- if err != nil {- return objectid.ObjectID{}, err- }
-
- return objectid.FromBytes(reader.algo, layer.chunkOIDLookup[start:end])
-}
--- a/format/commitgraph/read/open.go
+++ /dev/null
@@ -1,24 +1,0 @@
-package read
-
-import (
- "fmt"
- "os"
-
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-// Open opens commit-graph data from one objects root.
-func Open(root *os.Root, algo objectid.Algorithm, mode OpenMode) (*Reader, error) {- if algo.Size() == 0 {- return nil, objectid.ErrInvalidAlgorithm
- }
-
- switch mode {- case OpenSingle:
- return openSingle(root, algo)
- case OpenChain:
- return openChain(root, algo)
- default:
- return nil, fmt.Errorf("format/commitgraph: invalid open mode %d", mode)- }
-}
--- a/format/commitgraph/read/open_chain.go
+++ /dev/null
@@ -1,133 +1,0 @@
-package read
-
-import (
- "bufio"
- "errors"
- "fmt"
- "os"
- "strings"
-
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-func openChain(root *os.Root, algo objectid.Algorithm) (*Reader, error) {- chainPath := "info/commit-graphs/commit-graph-chain"
-
- file, err := root.Open(chainPath)
- if err != nil {- if errors.Is(err, os.ErrNotExist) {- return nil, &MalformedError{Path: chainPath, Reason: "missing commit-graph-chain"}- }
-
- return nil, err
- }
-
- scanner := bufio.NewScanner(file)
- hashes := make([]string, 0)
-
- for scanner.Scan() {- line := strings.TrimSpace(scanner.Text())
- if line == "" {- continue
- }
-
- hashes = append(hashes, line)
- }
-
- scanErr := scanner.Err()
- closeErr := file.Close()
-
- if scanErr != nil {- return nil, scanErr
- }
-
- if closeErr != nil {- return nil, closeErr
- }
-
- if len(hashes) == 0 {- return nil, &MalformedError{Path: chainPath, Reason: "empty chain"}- }
-
- layers := make([]layer, 0, len(hashes))
-
- var total uint32
-
- hashVersion, err := intconv.Uint32ToUint8(algo.PackHashID())
- if err != nil {- return nil, err
- }
-
- for i, hashHex := range hashes {- expectedBaseCount, err := intconv.IntToUint32(i)
- if err != nil {- closeLayers(layers)
-
- return nil, err
- }
-
- if len(hashHex) != algo.HexLen() {- closeLayers(layers)
-
- return nil, &MalformedError{- Path: chainPath,
- Reason: fmt.Sprintf("invalid graph hash length at line %d", i+1),- }
- }
-
- relPath := fmt.Sprintf("info/commit-graphs/graph-%s.graph", hashHex)-
- loaded, loadErr := openLayer(root, relPath, algo)
- if loadErr != nil {- closeLayers(layers)
-
- return nil, loadErr
- }
-
- if loaded.baseCount != expectedBaseCount {- _ = loaded.close()
-
- closeLayers(layers)
-
- return nil, &MalformedError{- Path: relPath,
- Reason: fmt.Sprintf("BASE count %d does not match chain depth %d", loaded.baseCount, i),- }
- }
-
- validateErr := validateChainBaseHashes(algo, hashes, i, loaded)
- if validateErr != nil {- _ = loaded.close()
-
- closeLayers(layers)
-
- return nil, validateErr
- }
-
- loaded.globalFrom = total
- loaded.baseCount = expectedBaseCount
-
- totalNext := total + loaded.numCommits
- if totalNext < total {- _ = loaded.close()
-
- closeLayers(layers)
-
- return nil, &MalformedError{Path: relPath, Reason: "total commit count overflow"}- }
-
- total = totalNext
-
- layers = append(layers, *loaded)
- }
-
- out := &Reader{- algo: algo,
- hashVersion: hashVersion,
- layers: layers,
- total: total,
- }
-
- return out, nil
-}
--- a/format/commitgraph/read/open_single.go
+++ /dev/null
@@ -1,32 +1,0 @@
-package read
-
-import (
- "os"
-
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-func openSingle(root *os.Root, algo objectid.Algorithm) (*Reader, error) {- graph, err := openLayer(root, "info/commit-graph", algo)
- if err != nil {- return nil, err
- }
-
- graph.baseCount = 0
- graph.globalFrom = 0
-
- hashVersion, err := intconv.Uint32ToUint8(algo.PackHashID())
- if err != nil {- return nil, err
- }
-
- out := &Reader{- algo: algo,
- hashVersion: hashVersion,
- layers: []layer{*graph},- total: graph.numCommits,
- }
-
- return out, nil
-}
--- a/format/commitgraph/read/parents.go
+++ /dev/null
@@ -1,67 +1,0 @@
-package read
-
-import "codeberg.org/lindenii/furgit/format/commitgraph"
-
-// ParentRef references one parent position.
-type ParentRef struct {- Valid bool
- Pos Position
-}
-
-func (reader *Reader) decodeParents(layer *layer, p1, p2 uint32) (ParentRef, ParentRef, []Position, error) {- parent1, err := reader.decodeSingleParent(p1)
- if err != nil {- return ParentRef{}, ParentRef{}, nil, err- }
-
- if p2 == commitgraph.ParentNone {- return parent1, ParentRef{}, nil, nil- }
-
- if p2&commitgraph.ParentExtraMask == 0 {- parent2, err := reader.decodeSingleParent(p2)
- if err != nil {- return ParentRef{}, ParentRef{}, nil, err- }
-
- return parent1, parent2, nil, nil
- }
-
- edgeStart := p2 & commitgraph.ParentLastMask
-
- parents, err := reader.decodeExtraEdgeList(layer, edgeStart)
- if err != nil {- return ParentRef{}, ParentRef{}, nil, err- }
-
- if len(parents) == 0 {- return ParentRef{}, ParentRef{}, nil, &MalformedError{Path: layer.path, Reason: "empty EDGE list"}- }
-
- parent2 := ParentRef{Valid: true, Pos: parents[0]}- if len(parents) == 1 {- return parent1, parent2, nil, nil
- }
-
- return parent1, parent2, parents[1:], nil
-}
-
-func (reader *Reader) decodeSingleParent(raw uint32) (ParentRef, error) {- if raw == commitgraph.ParentNone {- return ParentRef{}, nil- }
-
- if raw&commitgraph.ParentExtraMask != 0 {- return ParentRef{}, &MalformedError{- Path: "commit-graph",
- Reason: "unexpected EDGE marker in single-parent slot",
- }
- }
-
- pos, err := reader.globalToPosition(raw)
- if err != nil {- return ParentRef{}, err- }
-
- return ParentRef{Valid: true, Pos: pos}, nil-}
--- a/format/commitgraph/read/position.go
+++ /dev/null
@@ -1,38 +1,0 @@
-package read
-
-import (
- "fmt"
-
- "codeberg.org/lindenii/furgit/internal/intconv"
-)
-
-// Position identifies one commit record by layer and row index.
-type Position struct {- Graph uint32
- Index uint32
-}
-
-func (reader *Reader) globalToPosition(global uint32) (Position, error) {- for i := range reader.layers {- layer := &reader.layers[i]
- from := layer.globalFrom
-
- to := from + layer.numCommits
- if global >= from && global < to {- graph, err := intconv.IntToUint32(i)
- if err != nil {- return Position{}, err- }
-
- return Position{- Graph: graph,
- Index: global - from,
- }, nil
- }
- }
-
- return Position{}, &MalformedError{- Path: "commit-graph",
- Reason: fmt.Sprintf("parent global position out of range: %d", global),- }
-}
--- a/format/commitgraph/read/read_test.go
+++ /dev/null
@@ -1,322 +1,0 @@
-package read_test
-
-import (
- "errors"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
-
- "codeberg.org/lindenii/furgit/format/commitgraph/bloom"
- "codeberg.org/lindenii/furgit/format/commitgraph/read"
- "codeberg.org/lindenii/furgit/internal/intconv"
- "codeberg.org/lindenii/furgit/internal/testgit"
- "codeberg.org/lindenii/furgit/objectid"
-)
-
-func fixtureRepoPath(t *testing.T, algo objectid.Algorithm, name string) string {- t.Helper()
-
- return filepath.Join("testdata", "fixtures", algo.String(), name, "repo.git")-}
-
-func fixtureRepo(t *testing.T, algo objectid.Algorithm, name string) *testgit.TestRepo {- t.Helper()
-
- return testgit.NewRepoFromFixture(t, algo, fixtureRepoPath(t, algo, name))
-}
-
-func TestReadSingleMatchesGit(t *testing.T) {- t.Parallel()
-
- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper- testRepo := fixtureRepo(t, algo, "single_changed")
-
- reader := openReader(t, testRepo, read.OpenSingle)
-
- defer func() { _ = reader.Close() }()-
- allIDs := testRepo.RevList(t, "--all")
- if len(allIDs) == 0 {- t.Fatal("git rev-list --all returned no commits")- }
-
- wantCommitCount, err := intconv.IntToUint32(len(allIDs))
- if err != nil {- t.Fatalf("len(allIDs) convert: %v", err)- }
-
- if got := reader.NumCommits(); got != wantCommitCount {- t.Fatalf("NumCommits() = %d, want %d", got, len(allIDs))- }
-
- if !reader.HasBloom() {- t.Fatal("HasBloom() = false, want true")- }
-
- bloomVersion := reader.BloomVersion()
- if bloomVersion == 0 {- t.Fatal("BloomVersion() = 0, want non-zero when HasBloom() is true")- }
-
- for _, id := range allIDs {- pos, err := reader.Lookup(id)
- if err != nil {- t.Fatalf("Lookup(%s): %v", id, err)- }
-
- gotID, err := reader.OIDAt(pos)
- if err != nil {- t.Fatalf("OIDAt(%+v): %v", pos, err)- }
-
- if gotID != id {- t.Fatalf("OIDAt(Lookup(%s)) = %s, want %s", id, gotID, id)- }
- }
-
- step := max(len(allIDs)/24, 1)
-
- for i, id := range allIDs {- if i%step != 0 && i != len(allIDs)-1 {- continue
- }
-
- verifyCommitAgainstGit(t, testRepo, reader, id)
- }
- })
-}
-
-func TestReadChainMatchesGit(t *testing.T) {- t.Parallel()
-
- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper- testRepo := fixtureRepo(t, algo, "chain_changed")
-
- reader := openReader(t, testRepo, read.OpenChain)
-
- defer func() { _ = reader.Close() }()-
- layers := reader.Layers()
- if len(layers) < 2 {- t.Fatalf("Layers len = %d, want >= 2", len(layers))- }
-
- allIDs := testRepo.RevList(t, "--all")
-
- wantCommitCount, err := intconv.IntToUint32(len(allIDs))
- if err != nil {- t.Fatalf("len(allIDs) convert: %v", err)- }
-
- if got := reader.NumCommits(); got != wantCommitCount {- t.Fatalf("NumCommits() = %d, want %d", got, len(allIDs))- }
-
- step := max(len(allIDs)/20, 1)
-
- for i, id := range allIDs {- pos, err := reader.Lookup(id)
- if err != nil {- t.Fatalf("Lookup(%s): %v", id, err)- }
-
- if i%step != 0 && i != len(allIDs)-1 {- continue
- }
-
- gotID, err := reader.OIDAt(pos)
- if err != nil {- t.Fatalf("OIDAt(%+v): %v", pos, err)- }
-
- if gotID != id {- t.Fatalf("OIDAt(Lookup(%s)) = %s, want %s", id, gotID, id)- }
- }
- })
-}
-
-func TestBloomUnavailableWithoutChangedPaths(t *testing.T) {- t.Parallel()
-
- testgit.ForEachAlgorithm(t, func(t *testing.T, algo objectid.Algorithm) { //nolint:thelper- testRepo := fixtureRepo(t, algo, "single_nochanged")
-
- reader := openReader(t, testRepo, read.OpenSingle)
-
- defer func() { _ = reader.Close() }()-
- head := testRepo.RevParse(t, "HEAD")
-
- pos, err := reader.Lookup(head)
- if err != nil {- t.Fatalf("Lookup(%s): %v", head, err)- }
-
- _, err = reader.BloomFilterAt(pos)
- if err == nil {- t.Fatal("BloomFilterAt() error = nil, want BloomUnavailableError")- }
-
- unavailable, ok := errors.AsType[*read.BloomUnavailableError](err)
- if !ok {- t.Fatalf("BloomFilterAt() error type = %T, want *BloomUnavailableError", err)- }
-
- if unavailable.Pos != pos {- t.Fatalf("BloomUnavailableError.Pos = %+v, want %+v", unavailable.Pos, pos)- }
- })
-}
-
-func openReader(tb testing.TB, testRepo *testgit.TestRepo, mode read.OpenMode) *read.Reader {- tb.Helper()
-
- root := testRepo.OpenObjectsRoot(tb)
-
- reader, err := read.Open(root, testRepo.Algorithm(), mode)
- if err != nil {- tb.Fatalf("read.Open(objects): %v", err)- }
-
- return reader
-}
-
-func verifyCommitAgainstGit(tb testing.TB, testRepo *testgit.TestRepo, reader *read.Reader, id objectid.ObjectID) {- tb.Helper()
-
- pos, err := reader.Lookup(id)
- if err != nil {- tb.Fatalf("Lookup(%s): %v", id, err)- }
-
- commit, err := reader.CommitAt(pos)
- if err != nil {- tb.Fatalf("CommitAt(%+v): %v", pos, err)- }
-
- if commit.OID != id {- tb.Fatalf("CommitAt(%+v).OID = %s, want %s", pos, commit.OID, id)- }
-
- treeHex := testRepo.Run(tb, "show", "-s", "--format=%T", id.String())
-
- wantTree, err := objectid.ParseHex(testRepo.Algorithm(), treeHex)
- if err != nil {- tb.Fatalf("parse tree id %q: %v", treeHex, err)- }
-
- if commit.TreeOID != wantTree {- tb.Fatalf("CommitAt(%+v).TreeOID = %s, want %s", pos, commit.TreeOID, wantTree)- }
-
- wantParents := parseOIDLine(tb, testRepo.Algorithm(), testRepo.Run(tb, "show", "-s", "--format=%P", id.String()))
-
- gotParents := commitParents(tb, reader, commit)
- if len(gotParents) != len(wantParents) {- tb.Fatalf("parent count for %s = %d, want %d", id, len(gotParents), len(wantParents))- }
-
- for i := range gotParents {- if gotParents[i] != wantParents[i] {- tb.Fatalf("parent %d for %s = %s, want %s", i, id, gotParents[i], wantParents[i])- }
- }
-
- commitTimeRaw := testRepo.Run(tb, "show", "-s", "--format=%ct", id.String())
-
- wantCommitTime, err := strconv.ParseInt(strings.TrimSpace(commitTimeRaw), 10, 64)
- if err != nil {- tb.Fatalf("parse commit time %q: %v", commitTimeRaw, err)- }
-
- if commit.CommitTimeUnix != wantCommitTime {- tb.Fatalf("CommitAt(%+v).CommitTimeUnix = %d, want %d", pos, commit.CommitTimeUnix, wantCommitTime)- }
-
- filter, err := reader.BloomFilterAt(pos)
- if err != nil {- tb.Fatalf("BloomFilterAt(%+v): %v", pos, err)- }
-
- if filter.HashVersion != uint32(reader.BloomVersion()) {- tb.Fatalf("filter.HashVersion = %d, want %d", filter.HashVersion, reader.BloomVersion())- }
-
- assertChangedPathsBloomPositive(tb, testRepo, filter, id)
-}
-
-func commitParents(tb testing.TB, reader *read.Reader, commit read.Commit) []objectid.ObjectID {- tb.Helper()
-
- out := make([]objectid.ObjectID, 0, 2+len(commit.ExtraParents))
-
- if commit.Parent1.Valid {- id, err := reader.OIDAt(commit.Parent1.Pos)
- if err != nil {- tb.Fatalf("OIDAt(parent1 %+v): %v", commit.Parent1.Pos, err)- }
-
- out = append(out, id)
- }
-
- if commit.Parent2.Valid {- id, err := reader.OIDAt(commit.Parent2.Pos)
- if err != nil {- tb.Fatalf("OIDAt(parent2 %+v): %v", commit.Parent2.Pos, err)- }
-
- out = append(out, id)
- }
-
- for _, parentPos := range commit.ExtraParents {- id, err := reader.OIDAt(parentPos)
- if err != nil {- tb.Fatalf("OIDAt(extra parent %+v): %v", parentPos, err)- }
-
- out = append(out, id)
- }
-
- return out
-}
-
-func assertChangedPathsBloomPositive(tb testing.TB, testRepo *testgit.TestRepo, filter *bloom.Filter, commitID objectid.ObjectID) {- tb.Helper()
-
- changedPaths := testRepo.Run(tb, "diff-tree", "--no-commit-id", "--name-only", "-r", "--root", commitID.String())
- for line := range strings.SplitSeq(strings.TrimSpace(changedPaths), "\n") {- path := strings.TrimSpace(line)
- if path == "" {- continue
- }
-
- mightContain, err := filter.MightContain([]byte(path))
- if err != nil {- tb.Fatalf("MightContain(%q): %v", path, err)- }
-
- if !mightContain {- tb.Fatalf("Bloom filter false negative for commit %s path %q", commitID, path)- }
- }
-}
-
-func parseOIDLine(tb testing.TB, algo objectid.Algorithm, line string) []objectid.ObjectID {- tb.Helper()
-
- toks := strings.Fields(line)
-
- out := make([]objectid.ObjectID, 0, len(toks))
- for _, tok := range toks {- id, err := objectid.ParseHex(algo, tok)
- if err != nil {- tb.Fatalf("parse object id %q: %v", tok, err)- }
-
- out = append(out, id)
- }
-
- return out
-}
--- a/format/commitgraph/read/reader.go
+++ /dev/null
@@ -1,14 +1,0 @@
-package read
-
-import "codeberg.org/lindenii/furgit/objectid"
-
-// Reader provides read-only access to one mmap-backed commit-graph snapshot.
-//
-// It is safe for concurrent read-only queries.
-type Reader struct {- algo objectid.Algorithm
- hashVersion uint8
-
- layers []layer
- total uint32
-}
--- a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/HEAD
+++ /dev/null
@@ -1,1 +1,0 @@
-ref: refs/heads/master
--- a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/config
+++ /dev/null
@@ -1,4 +1,0 @@
-[core]
- repositoryformatversion = 0
- filemode = true
- bare = true
--- a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/info/commit-graphs/commit-graph-chain
+++ /dev/null
@@ -1,2 +1,0 @@
-dd7578d5216ca76c25b19631ba90f7498aeabbe7
-bf985c21612a52070d8b008e6ef51edf8b609401
binary files a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/info/commit-graphs/graph-bf985c21612a52070d8b008e6ef51edf8b609401.graph /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/info/commit-graphs/graph-dd7578d5216ca76c25b19631ba90f7498aeabbe7.graph /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/info/packs
+++ /dev/null
@@ -1,2 +1,0 @@
-P pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.pack
-
binary files a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/pack/pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.bitmap /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/pack/pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.idx /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/pack/pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.pack /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/objects/pack/pack-15b064d6a8ef8cff520565f6db8c006b2e6f7f2f.rev /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha1/chain_changed/repo.git/refs/heads/master
+++ /dev/null
@@ -1,1 +1,0 @@
-46ca641fd65e566b8ecfa567a1f01766289192f8
--- a/format/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/HEAD
+++ /dev/null
@@ -1,1 +1,0 @@
-ref: refs/heads/main
--- a/format/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/config
+++ /dev/null
@@ -1,4 +1,0 @@
-[core]
- repositoryformatversion = 0
- filemode = true
- bare = true
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/info/commit-graph /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/info/packs
+++ /dev/null
@@ -1,2 +1,0 @@
-P pack-34e9e132566989e2abfe8821731236c77f9bcbe9.pack
-
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/pack/pack-34e9e132566989e2abfe8821731236c77f9bcbe9.bitmap /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/pack/pack-34e9e132566989e2abfe8821731236c77f9bcbe9.idx /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/pack/pack-34e9e132566989e2abfe8821731236c77f9bcbe9.pack /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/objects/pack/pack-34e9e132566989e2abfe8821731236c77f9bcbe9.rev /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha1/single_changed/repo.git/refs/heads/main
+++ /dev/null
@@ -1,1 +1,0 @@
-d02a8dbd1a8fbaac8ab7f7f1533cc312ab2c9eec
--- a/format/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/HEAD
+++ /dev/null
@@ -1,1 +1,0 @@
-ref: refs/heads/master
--- a/format/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/config
+++ /dev/null
@@ -1,4 +1,0 @@
-[core]
- repositoryformatversion = 0
- filemode = true
- bare = true
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/info/commit-graph /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/info/packs
+++ /dev/null
@@ -1,2 +1,0 @@
-P pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.pack
-
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/pack/pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.bitmap /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/pack/pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.idx /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/pack/pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.pack /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/objects/pack/pack-a3da595034c94bb16b6829d757a66b7d259b9ffc.rev /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha1/single_nochanged/repo.git/refs/heads/master
+++ /dev/null
@@ -1,1 +1,0 @@
-dda8217252bdf3e01fdf31309d0e5c3051b00945
--- a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/HEAD
+++ /dev/null
@@ -1,1 +1,0 @@
-ref: refs/heads/master
--- a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/config
+++ /dev/null
@@ -1,6 +1,0 @@
-[extensions]
- objectformat = sha256
-[core]
- repositoryformatversion = 1
- filemode = true
- bare = true
--- a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/info/commit-graphs/commit-graph-chain
+++ /dev/null
@@ -1,2 +1,0 @@
-505cab61f8ddfa614301e8f97943112739236c6bcd19ed4d1f7c6b830cab4f62
-77c47bd6ca2ce17208c9361717a5823c0cb4b5ee336a14959678e060d674ffb6
binary files a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/info/commit-graphs/graph-505cab61f8ddfa614301e8f97943112739236c6bcd19ed4d1f7c6b830cab4f62.graph /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/info/commit-graphs/graph-77c47bd6ca2ce17208c9361717a5823c0cb4b5ee336a14959678e060d674ffb6.graph /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/info/packs
+++ /dev/null
@@ -1,2 +1,0 @@
-P pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.pack
-
binary files a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/pack/pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.bitmap /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/pack/pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.idx /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/pack/pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.pack /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/objects/pack/pack-04168d0884c910f505cb9fbcf045957e44ccee06d812b5e531ae666014a26ed1.rev /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha256/chain_changed/repo.git/refs/heads/master
+++ /dev/null
@@ -1,1 +1,0 @@
-10d2943dc7ad88011cae3b776d9565d6451a350ce1d16949bc8546a5fe6c0a53
--- a/format/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/HEAD
+++ /dev/null
@@ -1,1 +1,0 @@
-ref: refs/heads/main
--- a/format/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/config
+++ /dev/null
@@ -1,6 +1,0 @@
-[extensions]
- objectformat = sha256
-[core]
- repositoryformatversion = 1
- filemode = true
- bare = true
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/info/commit-graph /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/info/packs
+++ /dev/null
@@ -1,2 +1,0 @@
-P pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.pack
-
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/pack/pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.bitmap /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/pack/pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.idx /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/pack/pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.pack /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/objects/pack/pack-316dbc67dac12d131591640da0c55b76387cbf1fd2a117ab3d7ca0d854a031c9.rev /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha256/single_changed/repo.git/refs/heads/main
+++ /dev/null
@@ -1,1 +1,0 @@
-a9ff114900e6be139ec66a2a61c930973d8c4bc6fd3b899405ee7ab8740bdbd3
--- a/format/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/HEAD
+++ /dev/null
@@ -1,1 +1,0 @@
-ref: refs/heads/master
--- a/format/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/config
+++ /dev/null
@@ -1,6 +1,0 @@
-[extensions]
- objectformat = sha256
-[core]
- repositoryformatversion = 1
- filemode = true
- bare = true
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/info/commit-graph /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/info/packs
+++ /dev/null
@@ -1,2 +1,0 @@
-P pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.pack
-
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/pack/pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.bitmap /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/pack/pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.idx /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/pack/pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.pack /dev/null differ
binary files a/format/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/objects/pack/pack-d335453f760b064e36459d780ec9bf0e5dd596c0ee1ac6310136067c4f13438b.rev /dev/null differ
--- a/format/commitgraph/read/testdata/fixtures/sha256/single_nochanged/repo.git/refs/heads/master
+++ /dev/null
@@ -1,1 +1,0 @@
-7e396bf648e3b045c293d9fbdc533d4377d4e801d5d1fb57b84d22dd054a5860
--- a/internal/commitquery/commit.go
+++ b/internal/commitquery/commit.go
@@ -1,7 +1,7 @@
package commitquery
import (
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/objectid"
)
--- a/internal/commitquery/context.go
+++ b/internal/commitquery/context.go
@@ -2,7 +2,7 @@
package commitquery
import (
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/objectid"
"codeberg.org/lindenii/furgit/objectstore"
)
--- a/internal/commitquery/graph_pos.go
+++ b/internal/commitquery/graph_pos.go
@@ -1,6 +1,6 @@
package commitquery
-import commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+import commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
// ResolveGraphPos resolves one commit-graph position to one internal query node.
func (ctx *Context) ResolveGraphPos(pos commitgraphread.Position) (NodeIndex, error) {--- a/internal/commitquery/node.go
+++ b/internal/commitquery/node.go
@@ -1,7 +1,7 @@
package commitquery
import (
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/objectid"
)
--- a/internal/commitquery/oid.go
+++ b/internal/commitquery/oid.go
@@ -4,7 +4,7 @@
stderrors "errors"
giterrors "codeberg.org/lindenii/furgit/errors"
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/object"
"codeberg.org/lindenii/furgit/objectid"
"codeberg.org/lindenii/furgit/objectstore"
--- a/internal/commitquery/parent.go
+++ b/internal/commitquery/parent.go
@@ -1,7 +1,7 @@
package commitquery
import (
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/objectid"
)
--- a/internal/testgit/repo_open_commit_graph.go
+++ b/internal/testgit/repo_open_commit_graph.go
@@ -3,7 +3,7 @@
import (
"testing"
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
)
// OpenCommitGraph opens the repository commit-graph and registers cleanup on
--- a/mergebase/base.go
+++ b/mergebase/base.go
@@ -1,7 +1,7 @@
package mergebase
import (
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/objectid"
"codeberg.org/lindenii/furgit/objectstore"
)
--- a/mergebase/mergebase.go
+++ b/mergebase/mergebase.go
@@ -2,7 +2,7 @@
package mergebase
import (
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/objectid"
"codeberg.org/lindenii/furgit/objectstore"
)
--- a/mergebase/query.go
+++ b/mergebase/query.go
@@ -1,7 +1,7 @@
package mergebase
import (
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/objectid"
"codeberg.org/lindenii/furgit/objectstore"
)
--- a/reachability/reachability.go
+++ b/reachability/reachability.go
@@ -2,7 +2,7 @@
package reachability
import (
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/objectstore"
)
--- a/reachability/walk_expand_commits_graph.go
+++ b/reachability/walk_expand_commits_graph.go
@@ -3,7 +3,7 @@
import (
"errors"
- commitgraphread "codeberg.org/lindenii/furgit/format/commitgraph/read"
+ commitgraphread "codeberg.org/lindenii/furgit/commitgraph/read"
"codeberg.org/lindenii/furgit/objectid"
"codeberg.org/lindenii/furgit/objecttype"
)
--
⑨