Skip to content

Commit 3cad6c0

Browse files
authored
Merge pull request moby#6076 from tonistiigi/cache-debuginfo-db-remote
debug: allow debug from remote cache config
2 parents 84e0b4e + 854eace commit 3cad6c0

File tree

6 files changed

+152
-52
lines changed

6 files changed

+152
-52
lines changed

cache/remotecache/v1/cachestorage.go

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,12 @@ func (cs *cacheKeyStorage) Exists(id string) bool {
106106
return ok
107107
}
108108

109-
func (cs *cacheKeyStorage) Walk(func(id string) error) error {
109+
func (cs *cacheKeyStorage) Walk(cb func(id string) error) error {
110+
for id := range cs.byID {
111+
if err := cb(id); err != nil {
112+
return err
113+
}
114+
}
110115
return nil
111116
}
112117

@@ -142,6 +147,26 @@ func (cs *cacheKeyStorage) Release(resultID string) error {
142147
func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error {
143148
return nil
144149
}
150+
151+
func (cs *cacheKeyStorage) WalkLinksAll(id string, fn func(id string, link solver.CacheInfoLink) error) error {
152+
it, ok := cs.byID[id]
153+
if !ok {
154+
return nil
155+
}
156+
for nl, ids := range it.links {
157+
for _, id2 := range ids {
158+
if err := fn(id2, solver.CacheInfoLink{
159+
Input: solver.Index(nl.input),
160+
Selector: digest.Digest(nl.selector),
161+
Digest: nl.dgst,
162+
}); err != nil {
163+
return err
164+
}
165+
}
166+
}
167+
return nil
168+
}
169+
145170
func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error {
146171
it, ok := cs.byID[id]
147172
if !ok {

cmd/buildkitd/debug.go

Lines changed: 78 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
package main
22

33
import (
4+
"cmp"
45
"context"
6+
"encoding/binary"
57
"encoding/json"
68
"expvar"
79
"fmt"
@@ -15,6 +17,7 @@ import (
1517
"strings"
1618
"time"
1719

20+
cacheimport "github.com/moby/buildkit/cache/remotecache/v1"
1821
"github.com/moby/buildkit/solver"
1922
"github.com/moby/buildkit/util/bklog"
2023
"github.com/moby/buildkit/util/cachedigest"
@@ -40,6 +43,7 @@ func setupDebugHandlers(addr string) error {
4043
m.Handle("/debug/cache/all", http.HandlerFunc(handleCacheAll))
4144
m.Handle("/debug/cache/lookup", http.HandlerFunc(handleCacheLookup))
4245
m.Handle("/debug/cache/store", http.HandlerFunc(handleDebugCacheStore))
46+
m.Handle("POST /debug/cache/load", http.HandlerFunc(handleCacheLoad))
4347

4448
m.Handle("/debug/gc", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
4549
runtime.GC()
@@ -139,9 +143,13 @@ func printCacheRecord(record *cachedigest.Record, w io.Writer) {
139143
case cachedigest.FrameIDData:
140144
w.Write([]byte(" " + frame.ID.String() + ": " + string(frame.Data) + "\n"))
141145
case cachedigest.FrameIDSkip:
142-
w.Write([]byte(" skipping " + string(frame.Data) + " bytes\n"))
146+
fmt.Fprintf(w, " skipping %d bytes\n", binary.LittleEndian.Uint32(frame.Data))
143147
}
144148
}
149+
for _, subRec := range record.SubRecords {
150+
w.Write([]byte("\n"))
151+
printCacheRecord(subRec, w)
152+
}
145153
}
146154

147155
func cacheRecordLookup(ctx context.Context, dgst digest.Digest) (*cachedigest.Record, error) {
@@ -216,18 +224,70 @@ func loadCacheAll(ctx context.Context) ([]*cachedigest.Record, error) {
216224
return records, nil
217225
}
218226

227+
func handleCacheLoad(w http.ResponseWriter, r *http.Request) {
228+
if r.Method != http.MethodPost {
229+
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
230+
return
231+
}
232+
if r.Body == nil {
233+
http.Error(w, "body is required", http.StatusBadRequest)
234+
return
235+
}
236+
defer r.Body.Close()
237+
238+
recs, err := loadCacheFromReader(r.Context(), r.Body)
239+
if err != nil {
240+
http.Error(w, "failed to load cache: "+err.Error(), http.StatusInternalServerError)
241+
return
242+
}
243+
writeCacheRecordsResponse(w, r, recs)
244+
}
245+
246+
func loadCacheFromReader(ctx context.Context, rdr io.Reader) ([]*recordWithDebug, error) {
247+
dt, err := io.ReadAll(rdr)
248+
if err != nil {
249+
return nil, errors.Wrap(err, "failed to read body")
250+
}
251+
252+
allLayers := cacheimport.DescriptorProvider{}
253+
cc := cacheimport.NewCacheChains()
254+
if err := cacheimport.Parse(dt, allLayers, cc); err != nil {
255+
return nil, err
256+
}
257+
258+
keyStorage, _, err := cacheimport.NewCacheKeyStorage(cc, nil)
259+
if err != nil {
260+
return nil, err
261+
}
262+
263+
recs, err := debugCacheStore(ctx, keyStorage)
264+
if err != nil {
265+
return nil, errors.Wrap(err, "failed to debug cache store")
266+
}
267+
268+
return recs, nil
269+
}
270+
219271
func handleDebugCacheStore(w http.ResponseWriter, r *http.Request) {
220272
if r.Method != http.MethodGet {
221273
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
222274
return
223275
}
224276

225-
recs, err := debugCacheStore(r.Context())
277+
store := cacheStoreForDebug
278+
if store == nil {
279+
http.Error(w, "Cache store is not initialized for debug", http.StatusInternalServerError)
280+
}
281+
282+
recs, err := debugCacheStore(r.Context(), store)
226283
if err != nil {
227284
http.Error(w, "Failed to debug cache store: "+err.Error(), http.StatusInternalServerError)
228285
return
229286
}
287+
writeCacheRecordsResponse(w, r, recs)
288+
}
230289

290+
func writeCacheRecordsResponse(w http.ResponseWriter, r *http.Request, recs []*recordWithDebug) {
231291
w.WriteHeader(http.StatusOK)
232292

233293
switch r.Header.Get("Accept") {
@@ -250,24 +310,28 @@ func handleDebugCacheStore(w http.ResponseWriter, r *http.Request) {
250310
if rec.Digest != "" {
251311
fmt.Fprintf(w, "Digest: %s\n", rec.Digest)
252312
}
313+
253314
if len(rec.Parents) > 0 {
254315
fmt.Fprintln(w, "Parents:")
255-
for input := range rec.Parents {
256-
ids := slices.Collect(maps.Keys(rec.ParentIDs[input]))
257-
s := make([]string, len(ids))
258-
for i, id := range ids {
259-
s[i] = fmt.Sprintf("%d", id)
316+
slices.SortStableFunc(rec.Parents, func(i, j cachestore.Link) int {
317+
return cmp.Or(cmp.Compare(i.Input, j.Input), cmp.Compare(i.Digest, j.Digest))
318+
})
319+
for _, parent := range rec.Parents {
320+
fmt.Fprintf(w, " Input %d:\t%d\t%s\n", parent.Input, parent.Record.ID, parent.Digest)
321+
if parent.Selector != "" {
322+
fmt.Fprintf(w, " Selector: %s\n", parent.Selector)
260323
}
261-
fmt.Fprintf(w, " Input %d:\t %s\n", input, strings.Join(s, ", "))
262324
}
263325
}
264326
if len(rec.Children) > 0 {
265327
fmt.Fprintln(w, "Children:")
266-
for _, child := range rec.Children {
267-
fmt.Fprintf(w, " %d %s (input %d, output %d)\n", child.Record.ID, child.Digest, child.Input, child.Output)
268-
if child.Selector != "" {
269-
fmt.Fprintf(w, " Selector: %s\n", child.Selector)
328+
for input := range rec.Children {
329+
ids := slices.Collect(maps.Keys(rec.ChildIDs[input]))
330+
s := make([]string, len(ids))
331+
for i, id := range ids {
332+
s[i] = fmt.Sprintf("%d", id)
270333
}
334+
fmt.Fprintf(w, " Input %d:\t %s\n", input, strings.Join(s, ", "))
271335
}
272336
}
273337
if len(rec.Debug) > 0 {
@@ -287,12 +351,7 @@ type recordWithDebug struct {
287351
Debug []*cachedigest.Record `json:"debug,omitempty"`
288352
}
289353

290-
func debugCacheStore(ctx context.Context) ([]*recordWithDebug, error) {
291-
store := cacheStoreForDebug
292-
if store == nil {
293-
return nil, errors.New("cache store is not initialized for debug")
294-
}
295-
354+
func debugCacheStore(ctx context.Context, store solver.CacheKeyStorage) ([]*recordWithDebug, error) {
296355
recs, err := cachestore.Records(ctx, store)
297356
if err != nil {
298357
return nil, errors.Wrap(err, "failed to get cache records")
@@ -307,7 +366,7 @@ func debugCacheStore(ctx context.Context) ([]*recordWithDebug, error) {
307366
if rec.Digest != "" {
308367
m[rec.Digest] = nil
309368
}
310-
for _, link := range rec.Children {
369+
for _, link := range rec.Parents {
311370
m[link.Digest] = nil
312371
if link.Selector != "" {
313372
m[link.Selector] = nil

solver/bboltcachestorage/storage.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -366,6 +366,8 @@ func (s *Store) WalkLinksAll(id string, fn func(id string, link solver.CacheInfo
366366
if err := json.Unmarshal(parts[0], &link); err != nil {
367367
return err
368368
}
369+
// make digest relative to output as not all backends store output separately
370+
link.Digest = digest.FromBytes(fmt.Appendf(nil, "%s@%d", link.Digest, link.Output))
369371
links = append(links, linkEntry{
370372
id: string(parts[1]),
371373
link: link,

util/cachedigest/db_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ func TestNewHashAndGet(t *testing.T) {
9595
dataFrames = append(dataFrames, f.Data)
9696
case FrameIDSkip:
9797
require.Len(t, f.Data, 4)
98-
skipLens = append(skipLens, uint32(f.Data[0])<<24|uint32(f.Data[1])<<16|uint32(f.Data[2])<<8|uint32(f.Data[3]))
98+
skipLens = append(skipLens, uint32(f.Data[3])<<24|uint32(f.Data[2])<<16|uint32(f.Data[1])<<8|uint32(f.Data[0]))
9999
}
100100
}
101101
require.Len(t, dataFrames, len(inputs))

util/cachedigest/digest.go

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -67,11 +67,11 @@ func (h *Hash) WriteNoDebug(p []byte) (n int, err error) {
6767
if n > 0 && h.db != nil {
6868
if len(h.frames) > 0 && h.frames[len(h.frames)-1].ID == FrameIDSkip {
6969
last := &h.frames[len(h.frames)-1]
70-
prevLen := binary.BigEndian.Uint32(last.Data)
71-
binary.BigEndian.PutUint32(last.Data, prevLen+uint32(n))
70+
prevLen := binary.LittleEndian.Uint32(last.Data)
71+
binary.LittleEndian.PutUint32(last.Data, prevLen+uint32(n))
7272
} else {
7373
lenBytes := make([]byte, 4)
74-
binary.BigEndian.PutUint32(lenBytes, uint32(n))
74+
binary.LittleEndian.PutUint32(lenBytes, uint32(n))
7575
h.frames = append(h.frames, Frame{ID: FrameIDSkip, Data: lenBytes})
7676
}
7777
}
@@ -94,7 +94,7 @@ type Record struct {
9494
Digest digest.Digest `json:"digest"`
9595
Type Type `json:"type"`
9696
Data []Frame `json:"data,omitempty"`
97-
SubRecords []Record `json:"subRecords,omitempty"`
97+
SubRecords []*Record `json:"subRecords,omitempty"`
9898
}
9999

100100
var shaRegexpOnce = sync.OnceValue(func() *regexp.Regexp {
@@ -149,11 +149,16 @@ func (r *Record) LoadSubRecords(loader func(d digest.Digest) (Type, []Frame, err
149149
bklog.L.Warnf("failed to load sub-record for %s: %v", dgst, err)
150150
continue
151151
}
152-
r.SubRecords = append(r.SubRecords, Record{
152+
rr := &Record{
153153
Digest: digest.Digest(dgst),
154154
Type: typ,
155155
Data: frames,
156-
})
156+
}
157+
if err := rr.LoadSubRecords(loader); err != nil {
158+
return err
159+
}
160+
161+
r.SubRecords = append(r.SubRecords, rr)
157162
}
158163
return nil
159164
}

util/cachestore/store.go

Lines changed: 34 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
package cachestore
22

33
import (
4+
"cmp"
45
"context"
6+
"maps"
7+
"slices"
58
"strings"
69

710
"github.com/moby/buildkit/solver"
@@ -10,12 +13,12 @@ import (
1013
)
1114

1215
type Record struct {
13-
ID int `json:"id"`
14-
Parents map[int]map[*Record]struct{} `json:"-"`
15-
Children []Link `json:"children,omitempty"`
16-
Digest digest.Digest `json:"digest,omitempty"`
17-
Random bool `json:"random,omitempty"`
18-
ParentIDs map[int]map[int]struct{} `json:"parents,omitempty"`
16+
ID int `json:"id"`
17+
Parents []Link `json:"parents,omitempty"`
18+
Children map[int]map[*Record]struct{} `json:"-"`
19+
Digest digest.Digest `json:"digest,omitempty"`
20+
Random bool `json:"random,omitempty"`
21+
ChildIDs map[int]map[int]struct{} `json:"children,omitempty"`
1922
}
2023

2124
type Link struct {
@@ -35,7 +38,7 @@ type storeWithLinks interface {
3538
func Records(ctx context.Context, store solver.CacheKeyStorage) ([]*Record, error) {
3639
swl, ok := store.(storeWithLinks)
3740
if !ok {
38-
return nil, errors.New("cache store does not support walkin all links")
41+
return nil, errors.New("cache store does not support walking all links")
3942
}
4043

4144
roots := []string{}
@@ -72,16 +75,16 @@ func Records(ctx context.Context, store solver.CacheKeyStorage) ([]*Record, erro
7275
}
7376

7477
func setLinkIDs(rec *Record) {
75-
for i, child := range rec.Children {
76-
child.ID = child.Record.ID
77-
rec.Children[i] = child
78+
for i, parent := range rec.Parents {
79+
parent.ID = parent.Record.ID
80+
rec.Parents[i] = parent
7881
}
79-
if rec.Parents != nil {
80-
rec.ParentIDs = make(map[int]map[int]struct{})
81-
for input, m := range rec.Parents {
82-
rec.ParentIDs[input] = make(map[int]struct{})
83-
for parent := range m {
84-
rec.ParentIDs[input][parent.ID] = struct{}{}
82+
if rec.Children != nil {
83+
rec.ChildIDs = make(map[int]map[int]struct{})
84+
for input, m := range rec.Children {
85+
rec.ChildIDs[input] = make(map[int]struct{})
86+
for child := range m {
87+
rec.ChildIDs[input][child.ID] = struct{}{}
8588
}
8689
}
8790
}
@@ -93,8 +96,14 @@ func setIndex(rec *Record, arr []*Record) []*Record {
9396
}
9497
arr = append(arr, rec)
9598
rec.ID = len(arr)
96-
for _, child := range rec.Children {
97-
arr = setIndex(child.Record, arr)
99+
for _, links := range rec.Children {
100+
recs := slices.Collect(maps.Keys(links))
101+
slices.SortFunc(recs, func(i, j *Record) int {
102+
return cmp.Compare(i.Digest, j.Digest)
103+
})
104+
for _, child := range recs {
105+
arr = setIndex(child, arr)
106+
}
98107
}
99108
return arr
100109
}
@@ -122,23 +131,23 @@ func loadRecord(ctx context.Context, store storeWithLinks, id string, out map[st
122131
if err != nil {
123132
return errors.Wrapf(err, "failed to load link %s for %s", linkID, id)
124133
}
125-
rec.Children = append(rec.Children, Link{
134+
child.Parents = append(child.Parents, Link{
126135
Input: int(link.Input),
127136
Output: int(link.Output),
128137
Selector: link.Selector,
129-
Record: child,
138+
Record: rec,
130139
Digest: link.Digest,
131140
})
132141

133-
if child.Parents == nil {
134-
child.Parents = make(map[int]map[*Record]struct{})
142+
if rec.Children == nil {
143+
rec.Children = make(map[int]map[*Record]struct{})
135144
}
136-
m, ok := child.Parents[int(link.Input)]
145+
m, ok := rec.Children[int(link.Output)]
137146
if !ok {
138147
m = make(map[*Record]struct{})
139-
child.Parents[int(link.Input)] = m
148+
rec.Children[int(link.Output)] = m
140149
}
141-
m[rec] = struct{}{}
150+
m[child] = struct{}{}
142151
return nil
143152
})
144153
if err != nil {

0 commit comments

Comments
 (0)