fix(server): Use uncompressed tarball hashes in image config

Docker expects hashes of compressed tarballs in the manifest (as these
are used to fetch from the content-addressable layer store), but for
some reason it expects hashes in the configuration layer to be of
uncompressed tarballs.

To achieve this an additional SHA256 hash is calculcated while
creating the layer tarballs, but before passing them to the gzip
writer.

In the current constellation the symlink layer is first compressed and
then decompressed again to calculate its hash. This can be refactored
in a future change.
This commit is contained in:
Vincent Ambo 2019-10-11 11:57:14 +01:00 committed by Vincent Ambo
parent 7fc9718032
commit f57a2baf92
4 changed files with 42 additions and 16 deletions

View File

@ -137,11 +137,14 @@ let
symlinkLayerMeta = fromJSON (readFile (runCommand "symlink-layer-meta.json" {
buildInputs = with pkgs; [ coreutils jq openssl ];
}''
layerSha256=$(sha256sum ${symlinkLayer} | cut -d ' ' -f1)
gzipHash=$(sha256sum ${symlinkLayer} | cut -d ' ' -f1)
tarHash=$(cat ${symlinkLayer} | gzip -d | sha256sum | cut -d ' ' -f1)
layerSize=$(stat --printf '%s' ${symlinkLayer})
jq -n -c --arg sha256 $layerSha256 --arg size $layerSize --arg path ${symlinkLayer} \
'{ size: ($size | tonumber), sha256: $sha256, path: $path }' >> $out
jq -n -c --arg gzipHash $gzipHash --arg tarHash $tarHash --arg size $layerSize \
--arg path ${symlinkLayer} \
'{ size: ($size | tonumber), tarHash: $tarHash, gzipHash: $gzipHash, path: $path }' \
>> $out
''));
# Final output structure returned to Nixery if the build succeeded

View File

@ -10,6 +10,8 @@ package builder
import (
"archive/tar"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"os"
"path/filepath"
@ -19,26 +21,31 @@ import (
// Create a new compressed tarball from each of the paths in the list
// and write it to the supplied writer.
func packStorePaths(l *layers.Layer, w io.Writer) error {
//
// The uncompressed tarball is hashed because image manifests must
// contain both the hashes of compressed and uncompressed layers.
func packStorePaths(l *layers.Layer, w io.Writer) (string, error) {
shasum := sha256.New()
gz := gzip.NewWriter(w)
t := tar.NewWriter(gz)
multi := io.MultiWriter(shasum, gz)
t := tar.NewWriter(multi)
for _, path := range l.Contents {
err := filepath.Walk(path, tarStorePath(t))
if err != nil {
return err
return "", err
}
}
if err := t.Close(); err != nil {
return err
return "", err
}
if err := gz.Close(); err != nil {
return err
return "", err
}
return nil
return fmt.Sprintf("sha256:%x", shasum.Sum([]byte{})), nil
}
func tarStorePath(w *tar.Writer) filepath.WalkFunc {

View File

@ -117,9 +117,10 @@ type ImageResult struct {
// These fields are populated in case of success
Graph layers.RuntimeGraph `json:"runtimeGraph"`
SymlinkLayer struct {
Size int `json:"size"`
SHA256 string `json:"sha256"`
Path string `json:"path"`
Size int `json:"size"`
TarHash string `json:"tarHash"`
GzipHash string `json:"gzipHash"`
Path string `json:"path"`
} `json:"symlinkLayer"`
}
@ -269,8 +270,18 @@ func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageRes
entries = append(entries, *entry)
} else {
lh := l.Hash()
// While packing store paths, the SHA sum of
// the uncompressed layer is computed and
// written to `tarhash`.
//
// TODO(tazjin): Refactor this to make the
// flow of data cleaner.
var tarhash string
lw := func(w io.Writer) error {
return packStorePaths(&l, w)
var err error
tarhash, err = packStorePaths(&l, w)
return err
}
entry, err := uploadHashLayer(ctx, s, lh, lw)
@ -278,6 +289,7 @@ func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageRes
return nil, err
}
entry.MergeRating = l.MergeRating
entry.TarHash = tarhash
var pkgs []string
for _, p := range l.Contents {
@ -287,6 +299,7 @@ func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageRes
log.WithFields(log.Fields{
"layer": lh,
"packages": pkgs,
"tarhash": tarhash,
}).Info("created image layer")
go cacheLayer(ctx, s, l.Hash(), *entry)
@ -296,7 +309,7 @@ func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageRes
// Symlink layer (built in the first Nix build) needs to be
// included here manually:
slkey := result.SymlinkLayer.SHA256
slkey := result.SymlinkLayer.GzipHash
entry, err := uploadHashLayer(ctx, s, slkey, func(w io.Writer) error {
f, err := os.Open(result.SymlinkLayer.Path)
if err != nil {
@ -318,6 +331,7 @@ func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageRes
return nil, err
}
entry.TarHash = "sha256:" + result.SymlinkLayer.TarHash
go cacheLayer(ctx, s, slkey, *entry)
entries = append(entries, *entry)

View File

@ -29,9 +29,10 @@ type Entry struct {
Size int64 `json:"size"`
Digest string `json:"digest"`
// This field is internal to Nixery and not part of the
// These fields are internal to Nixery and not part of the
// serialised entry.
MergeRating uint64 `json:"-"`
TarHash string `json:",omitempty"`
}
type manifest struct {
@ -102,9 +103,10 @@ func Manifest(layers []Entry) (json.RawMessage, ConfigLayer) {
hashes := make([]string, len(layers))
for i, l := range layers {
hashes[i] = l.TarHash
l.MediaType = layerType
l.TarHash = ""
layers[i] = l
hashes[i] = l.Digest
}
c := configLayer(hashes)