Skip to content

Commit

Permalink
Support Acquire-By-Hash for index files
Browse files Browse the repository at this point in the history
The added "aptly publish repo" option "-access-by-hash" publishes
the index files (Packages*, Sources*) also as hardlinked hashes.
Example:
 /dists/yakkety/main/binary-amd64/by-hash/SHA512/31833ec39acc...
The Release files indicate this with the option "Acquire-By-Hash: yes"

This is used by apt >= 1.2.0 and prevents the "Hash sum mismatch" race
condition between a server side "aptly publish repo" and "apt-get update"
on a client.
See: http://www.chiark.greenend.org.uk/~cjwatson/blog/no-more-hash-sum-mismatch-errors.html

This implementation uses symlinks in the by-hash/*/ directory for keeping
only two versions of the index files and deleting older files
automatically.

Note: this only works with aptly.FileSystemPublishedStorage

Closes: aptly-dev#536

Signed-off-by: André Roth <neolynx@gmail.com>
  • Loading branch information
neolynx authored and Oliver Sauder committed Nov 17, 2017
1 parent 9c018ce commit 5b056d3
Show file tree
Hide file tree
Showing 5 changed files with 93 additions and 3 deletions.
5 changes: 5 additions & 0 deletions api/publish.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ func apiPublishUpdateSwitch(c *gin.Context) {
Component string `binding:"required"`
Name string `binding:"required"`
}
AccessByHash *bool
}

if c.Bind(&b) != nil {
Expand Down Expand Up @@ -317,6 +318,10 @@ func apiPublishUpdateSwitch(c *gin.Context) {
published.SkipContents = *b.SkipContents
}

if b.AccessByHash != nil {
published.AccessByHash = *b.AccessByHash
}

err = published.Publish(context.PackagePool(), context, context.CollectionFactory(), signer, nil, b.ForceOverwrite)
if err != nil {
c.AbortWithError(500, fmt.Errorf("unable to update: %s", err))
Expand Down
1 change: 1 addition & 0 deletions cmd/publish_repo.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ Example:
cmd.Flag.String("butautomaticupgrades", "", "set value for ButAutomaticUpgrades field")
cmd.Flag.String("label", "", "label to publish")
cmd.Flag.Bool("force-overwrite", false, "overwrite files in package pool in case of mismatch")
cmd.Flag.Bool("access-by-hash", false, "provide index files by hash also")

return cmd
}
4 changes: 4 additions & 0 deletions cmd/publish_snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,10 @@ func aptlyPublishSnapshotOrRepo(cmd *commander.Command, args []string) error {
published.SkipContents = context.Flags().Lookup("skip-contents").Value.Get().(bool)
}

if context.Flags().IsSet("access-by-hash") {
published.AccessByHash = context.Flags().Lookup("access-by-hash").Value.Get().(bool)
}

duplicate := context.CollectionFactory().PublishedRepoCollection().CheckDuplicate(published)
if duplicate != nil {
context.CollectionFactory().PublishedRepoCollection().LoadComplete(duplicate, context.CollectionFactory())
Expand Down
75 changes: 73 additions & 2 deletions deb/index_files.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"bufio"
"fmt"
"os"
"path"
"path/filepath"
"strings"

Expand All @@ -20,6 +21,7 @@ type indexFiles struct {
tempDir string
suffix string
indexes map[string]*indexFile
accessByHash bool
}

type indexFile struct {
Expand All @@ -28,6 +30,7 @@ type indexFile struct {
compressable bool
onlyGzip bool
signable bool
accessByHash bool
relativePath string
tempFilename string
tempFile *os.File
Expand Down Expand Up @@ -91,11 +94,24 @@ func (file *indexFile) Finalize(signer pgp.Signer) error {
file.parent.generatedFiles[file.relativePath+ext] = checksumInfo
}

err = file.parent.publishedStorage.MkDir(filepath.Dir(filepath.Join(file.parent.basePath, file.relativePath)))
filedir := filepath.Dir(filepath.Join(file.parent.basePath, file.relativePath))

err = file.parent.publishedStorage.MkDir(filedir)
if err != nil {
return fmt.Errorf("unable to create dir: %s", err)
}

hashs := []string{}
if file.accessByHash {
hashs = append(hashs, "MD5", "SHA1", "SHA256", "SHA512")
for _, hash := range hashs {
err = file.parent.publishedStorage.MkDir(filepath.Join(filedir, "by-hash", hash))
if err != nil {
return fmt.Errorf("unable to create dir: %s", err)
}
}
}

for _, ext := range exts {
err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+ext),
file.tempFilename+ext)
Expand All @@ -107,6 +123,29 @@ func (file *indexFile) Finalize(signer pgp.Signer) error {
file.parent.renameMap[filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+ext)] =
filepath.Join(file.parent.basePath, file.relativePath+ext)
}

if file.accessByHash {
sums := file.parent.generatedFiles[file.relativePath+ext]
storage := file.parent.publishedStorage.(aptly.FileSystemPublishedStorage).PublicPath()
src := filepath.Join(storage, file.parent.basePath, file.relativePath)

err = packageIndexByHash(src, file.parent.suffix, ext, storage, filedir, "SHA512", sums.SHA512)
if err != nil {
fmt.Printf("%s\n", err)
}
err = packageIndexByHash(src, file.parent.suffix, ext, storage, filedir, "SHA256", sums.SHA256)
if err != nil {
fmt.Printf("%s\n", err)
}
err = packageIndexByHash(src, file.parent.suffix, ext, storage, filedir, "SHA1", sums.SHA1)
if err != nil {
fmt.Printf("%s\n", err)
}
err = packageIndexByHash(src, file.parent.suffix, ext, storage, filedir, "MD5", sums.MD5)
if err != nil {
fmt.Printf("%s\n", err)
}
}
}

if file.signable && signer != nil {
Expand Down Expand Up @@ -143,7 +182,35 @@ func (file *indexFile) Finalize(signer pgp.Signer) error {
return nil
}

func newIndexFiles(publishedStorage aptly.PublishedStorage, basePath, tempDir, suffix string) *indexFiles {
func packageIndexByHash(src string, suffix string, ext string, storage string, filedir string, hash string, sum string) error {
indexfile := path.Base(src + ext)
src = src + suffix + ext
dst := filepath.Join(storage, filedir, "by-hash", hash)

if _, err := os.Stat(filepath.Join(dst, sum)); err == nil {
return nil
}
err := os.Link(src, filepath.Join(dst, sum))
if err != nil {
return fmt.Errorf("Access-By-Hash: error creating hardlink %s", filepath.Join(dst, sum))
}

if _, err := os.Stat(filepath.Join(dst, indexfile)); err == nil {
if _, err := os.Stat(filepath.Join(dst, indexfile+".old")); err == nil {
link, _ := os.Readlink(filepath.Join(dst, indexfile+".old"))
os.Remove(filepath.Join(dst, link))
os.Remove(filepath.Join(dst, indexfile+".old"))
}
os.Rename(filepath.Join(dst, indexfile), filepath.Join(dst, indexfile+".old"))
}
err = os.Symlink(sum, filepath.Join(dst, indexfile))
if err != nil {
return fmt.Errorf("Access-By-Hash: error creating symlink %s", filepath.Join(dst, indexfile))
}
return nil
}

func newIndexFiles(publishedStorage aptly.PublishedStorage, basePath, tempDir, suffix string, accessByHash bool) *indexFiles {
return &indexFiles{
publishedStorage: publishedStorage,
basePath: basePath,
Expand All @@ -152,6 +219,7 @@ func newIndexFiles(publishedStorage aptly.PublishedStorage, basePath, tempDir, s
tempDir: tempDir,
suffix: suffix,
indexes: make(map[string]*indexFile),
accessByHash: accessByHash,
}
}

Expand Down Expand Up @@ -179,6 +247,7 @@ func (files *indexFiles) PackageIndex(component, arch string, udeb bool) *indexF
discardable: false,
compressable: true,
signable: false,
accessByHash: files.accessByHash,
relativePath: relativePath,
}

Expand Down Expand Up @@ -212,6 +281,7 @@ func (files *indexFiles) ReleaseIndex(component, arch string, udeb bool) *indexF
discardable: udeb,
compressable: false,
signable: false,
accessByHash: files.accessByHash,
relativePath: relativePath,
}

Expand Down Expand Up @@ -242,6 +312,7 @@ func (files *indexFiles) ContentsIndex(component, arch string, udeb bool) *index
compressable: true,
onlyGzip: true,
signable: false,
accessByHash: files.accessByHash,
relativePath: relativePath,
}

Expand Down
11 changes: 10 additions & 1 deletion deb/publish.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@ type PublishedRepo struct {

// True if repo is being re-published
rePublishing bool

// Provide index files per hash also
AccessByHash bool
}

// ParsePrefix splits [storage:]prefix into components
Expand Down Expand Up @@ -556,7 +559,7 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
}
defer os.RemoveAll(tempDir)

indexes := newIndexFiles(publishedStorage, basePath, tempDir, suffix)
indexes := newIndexFiles(publishedStorage, basePath, tempDir, suffix, p.AccessByHash)

for component, list := range lists {
hadUdebs := false
Expand Down Expand Up @@ -683,6 +686,9 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
release["Component"] = component
release["Origin"] = p.GetOrigin()
release["Label"] = p.GetLabel()
if p.AccessByHash {
release["Acquire-By-Hash"] = "yes"
}

var bufWriter *bufio.Writer
bufWriter, err = indexes.ReleaseIndex(component, arch, udeb).BufWriter()
Expand Down Expand Up @@ -720,6 +726,9 @@ func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageP
release["Codename"] = p.Distribution
release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST")
release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{ArchitectureSource}), " ")
if p.AccessByHash {
release["Acquire-By-Hash"] = "yes"
}
release["Description"] = " Generated by aptly\n"
release["MD5Sum"] = ""
release["SHA1"] = ""
Expand Down

0 comments on commit 5b056d3

Please sign in to comment.