Skip to content

Commit

Permalink
Consume SpecificVariantCompressor in CandidateTemplateWithCompression
Browse files Browse the repository at this point in the history
Support consuming zstd:chunked cache entries; return either
zstd:chunked or zstd, based on what the user wants.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
  • Loading branch information
mtrmac committed Jul 27, 2024
1 parent ceabfec commit 6256d87
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 9 deletions.
5 changes: 0 additions & 5 deletions internal/manifest/manifest.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,11 +205,6 @@ type ReuseConditions struct {
// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
if c.RequiredCompression != nil {
if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
// The caller must re-compress to build those annotations.
return false
}
if candidateCompression == nil ||
(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
return false
Expand Down
27 changes: 25 additions & 2 deletions pkg/blobinfocache/internal/prioritize/prioritize.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,37 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
return nil // Not allowed with CandidateLocations2
default:
// See if we can use the specific variant, first.
if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor)
if err != nil {
logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v",
data.SpecificVariantCompressor, digest.String(), err)
} else {
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
PossibleManifestFormats: v2Options.PossibleManifestFormats,
RequiredCompression: v2Options.RequiredCompression,
}, &algo) {
logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v",
data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
} else {
return &blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressionOperation: types.Compress,
CompressionAlgorithm: &algo,
CompressionAnnotations: data.SpecificVariantAnnotations,
}
}
}
}

// Try the base variant.
algo, err := compression.AlgorithmByName(data.BaseVariantCompressor)
if err != nil {
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
digest.String(), data.BaseVariantCompressor, err)
return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
}

if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
PossibleManifestFormats: v2Options.PossibleManifestFormats,
RequiredCompression: v2Options.RequiredCompression,
Expand All @@ -81,7 +105,6 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation
digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats)
return nil
}

return &blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressionOperation: types.Compress,
Expand Down
5 changes: 3 additions & 2 deletions pkg/blobinfocache/internal/test/test.go
Original file line number Diff line number Diff line change
Expand Up @@ -564,8 +564,9 @@ func testGenericCandidateLocations2(t *testing.T, cache blobinfocache.BlobInfoCa
CanSubstitute: true,
RequiredCompression: &compression.ZstdChunked,
})
// Right now, zstd:chunked requests never match a candidate, see CandidateCompressionMatchesReuseConditions().
assertCandidatesMatch2(t, scopeName, []candidate{}, res)
assertCandidatesMatch2(t, scopeName, []candidate{
{d: digestZstdChunked, cn: compressiontypes.ZstdChunkedAlgorithmName, lr: "zstdChunked"},
}, res)
res = cache.CandidateLocations2(transport, scope, digestFilteringUncompressed, blobinfocache.CandidateLocations2Options{
CanSubstitute: true,
RequiredCompression: &compression.Zstd,
Expand Down

0 comments on commit 6256d87

Please sign in to comment.