From 5b5eeb795cdbda408c3dd26f6b48b8e5d99a1c42 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Fri, 28 Mar 2025 13:50:37 +1300 Subject: [PATCH 01/14] Refactor fetching of base ISO Separate the non-agent-specific parts out into the rhcos package, leaving the agent-specific parts behind in the agent/image package. --- .../image/overrides/os_image_override.txt | 2 +- pkg/asset/agent/image/agentartifacts.go | 11 +- pkg/asset/agent/image/agentimage.go | 2 +- pkg/asset/agent/image/baseiso.go | 191 +++--------------- pkg/asset/agent/image/ignition.go | 24 +-- pkg/asset/agent/image/oc_test.go | 4 +- .../agent/image/unconfigured_ignition.go | 4 +- pkg/asset/agent/mirror/registriesconf.go | 40 ++++ pkg/asset/rhcos/iso.go | 177 ++++++++++++++++ .../baseiso_test.go => rhcos/iso_test.go} | 72 +------ .../{agent/image => rhcos}/releaseextract.go | 151 ++++++-------- 11 files changed, 335 insertions(+), 343 deletions(-) create mode 100644 pkg/asset/rhcos/iso.go rename pkg/asset/{agent/image/baseiso_test.go => rhcos/iso_test.go} (62%) rename pkg/asset/{agent/image => rhcos}/releaseextract.go (69%) diff --git a/cmd/openshift-install/testdata/agent/image/overrides/os_image_override.txt b/cmd/openshift-install/testdata/agent/image/overrides/os_image_override.txt index fb4ddc7f38b..5ec00ae879a 100644 --- a/cmd/openshift-install/testdata/agent/image/overrides/os_image_override.txt +++ b/cmd/openshift-install/testdata/agent/image/overrides/os_image_override.txt @@ -6,7 +6,7 @@ env OPENSHIFT_INSTALL_OS_IMAGE_OVERRIDE= ! exists $WORK/agent.x86_64.iso stderr 'level=warning msg=Found override for OS Image. Please be warned, this is not advised' -stderr 'level=fatal msg=failed to fetch Agent Installer ISO: failed to fetch dependency of "Agent Installer ISO": failed to fetch dependency of "Agent Installer Artifacts": failed to generate asset "BaseIso Image": failed to get base ISO image: parse "": invalid URI for request' +stderr 'level=fatal msg=failed to fetch Agent Installer ISO: failed to fetch dependency of "Agent Installer ISO": failed to fetch dependency of "Agent Installer Artifacts": failed to generate asset "Base ISO Image": failed to get base ISO image: parse "": invalid URI for request' -- install-config.yaml -- apiVersion: v1 diff --git a/pkg/asset/agent/image/agentartifacts.go b/pkg/asset/agent/image/agentartifacts.go index f6208876264..a96dc80f704 100644 --- a/pkg/asset/agent/image/agentartifacts.go +++ b/pkg/asset/agent/image/agentartifacts.go @@ -20,6 +20,7 @@ import ( "github.com/openshift/installer/pkg/asset/agent/mirror" "github.com/openshift/installer/pkg/asset/agent/workflow" workflowreport "github.com/openshift/installer/pkg/asset/agent/workflow/report" + "github.com/openshift/installer/pkg/asset/rhcos" ) const ( @@ -115,7 +116,7 @@ func (a *AgentArtifacts) Generate(ctx context.Context, dependencies asset.Parent if err := workflowreport.GetReport(ctx).SubStage(workflow.StageAgentArtifactsAgentTUI); err != nil { return err } - agentTuiFiles, err = a.fetchAgentTuiFiles(agentManifests.ClusterImageSet.Spec.ReleaseImage, agentManifests.GetPullSecretData(), registriesConf.MirrorConfig) + agentTuiFiles, err = a.fetchAgentTuiFiles(agentManifests.ClusterImageSet.Spec.ReleaseImage, agentManifests.GetPullSecretData(), registriesConf) if err != nil { return err } @@ -132,10 +133,10 @@ func (a *AgentArtifacts) Generate(ctx context.Context, dependencies asset.Parent return nil } -func (a *AgentArtifacts) fetchAgentTuiFiles(releaseImage string, pullSecret string, mirrorConfig []mirror.RegistriesConfig) ([]string, error) { - release := NewRelease( - Config{MaxTries: OcDefaultTries, RetryDelay: OcDefaultRetryDelay}, - releaseImage, pullSecret, mirrorConfig, nil) +func (a *AgentArtifacts) fetchAgentTuiFiles(releaseImage string, pullSecret string, mirrorConfig rhcos.MirrorConfig) ([]string, error) { + release := rhcos.NewReleasePayload( + rhcos.ExtractConfig{}, + releaseImage, pullSecret, mirrorConfig) agentTuiFilenames := []string{"/usr/bin/agent-tui", "/usr/lib64/libnmstate.so.*"} files := []string{} diff --git a/pkg/asset/agent/image/agentimage.go b/pkg/asset/agent/image/agentimage.go index e481492a474..71bcbf97ba9 100644 --- a/pkg/asset/agent/image/agentimage.go +++ b/pkg/asset/agent/image/agentimage.go @@ -106,7 +106,7 @@ func (a *AgentImage) Generate(ctx context.Context, dependencies asset.Parents) e logrus.Debugf("Using custom rootfs URL: %s", a.rootFSURL) } else { // Default to the URL from the RHCOS streams file - defaultRootFSURL, err := baseIso.getRootFSURL(ctx, a.cpuArch) + defaultRootFSURL, err := baseIso.getRootFSURL(ctx, a.cpuArch, agentWorkflow, clusterInfo) if err != nil { return err } diff --git a/pkg/asset/agent/image/baseiso.go b/pkg/asset/agent/image/baseiso.go index 97dcfa6d46c..adfdb426173 100644 --- a/pkg/asset/agent/image/baseiso.go +++ b/pkg/asset/agent/image/baseiso.go @@ -3,12 +3,8 @@ package image import ( "context" "fmt" - "io/fs" "os" - "os/exec" - "time" - "github.com/coreos/stream-metadata-go/arch" "github.com/coreos/stream-metadata-go/stream" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -19,83 +15,30 @@ import ( "github.com/openshift/installer/pkg/asset/agent/manifests" "github.com/openshift/installer/pkg/asset/agent/mirror" "github.com/openshift/installer/pkg/asset/agent/workflow" - workflowreport "github.com/openshift/installer/pkg/asset/agent/workflow/report" - "github.com/openshift/installer/pkg/rhcos" - "github.com/openshift/installer/pkg/rhcos/cache" - "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/asset/rhcos" ) // BaseIso generates the base ISO file for the image type BaseIso struct { - File *asset.File - streamGetter CoreOSBuildFetcher - ocRelease Release + File *asset.File + ocRelease rhcos.ReleasePayload } -// CoreOSBuildFetcher will be to used to switch the source of the coreos metadata. -type CoreOSBuildFetcher func(ctx context.Context) (*stream.Stream, error) - var ( baseIsoFilename = "" - // DefaultCoreOSStreamGetter uses the pinned metadata. - DefaultCoreOSStreamGetter = rhcos.FetchCoreOSBuild ) var _ asset.WritableAsset = (*BaseIso)(nil) // Name returns the human-friendly name of the asset. func (i *BaseIso) Name() string { - return "BaseIso Image" -} - -func (i *BaseIso) getMetalArtifact(ctx context.Context, archName string) (stream.PlatformArtifacts, error) { - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - // Get the ISO to use from rhcos.json - st, err := i.streamGetter(ctx) - if err != nil { - return stream.PlatformArtifacts{}, err - } - - streamArch, err := st.GetArchitecture(archName) - if err != nil { - return stream.PlatformArtifacts{}, err - } - - metal, ok := streamArch.Artifacts["metal"] - if !ok { - return stream.PlatformArtifacts{}, fmt.Errorf("coreOs stream data not found for 'metal' artifact") - } - - return metal, nil -} - -// Download the ISO using the URL in rhcos.json. -func (i *BaseIso) downloadIso(ctx context.Context, archName string) (string, error) { - metal, err := i.getMetalArtifact(ctx, archName) - if err != nil { - return "", err - } - - format, ok := metal.Formats["iso"] - if !ok { - return "", fmt.Errorf("no ISO found to download for %s", archName) - } - - url := format.Disk.Location - sha := format.Disk.Sha256 - cachedImage, err := cache.DownloadImageFileWithSha(url, cache.AgentApplicationName, sha) - if err != nil { - return "", errors.Wrapf(err, "failed to download base ISO image %s", url) - } - - return cachedImage, nil + return "Base ISO Image" } // Fetch RootFS URL using the rhcos.json. -func (i *BaseIso) getRootFSURL(ctx context.Context, archName string) (string, error) { - metal, err := i.getMetalArtifact(ctx, archName) +func (i *BaseIso) getRootFSURL(ctx context.Context, archName string, agentWorkflow *workflow.AgentWorkflow, clusterInfo *joiner.ClusterInfo) (string, error) { + metal, err := rhcos.GetMetalArtifact( + ctx, archName, customStreamGetter(agentWorkflow, clusterInfo)) if err != nil { return "", err } @@ -119,45 +62,17 @@ func (i *BaseIso) Dependencies() []asset.Asset { } } -func (i *BaseIso) checkReleasePayloadBaseISOVersion(ctx context.Context, r Release, archName string) { - logrus.Debugf("Checking release payload base ISO version") - - // Get current release payload CoreOS version - payloadRelease, err := r.GetBaseIsoVersion(archName) - if err != nil { - logrus.Warnf("unable to determine base ISO version: %s", err.Error()) - return - } - - // Get pinned version from installer - metal, err := i.getMetalArtifact(ctx, archName) - if err != nil { - logrus.Warnf("unable to determine base ISO version: %s", err.Error()) - return - } - - // Check for a mismatch - if metal.Release != payloadRelease { - logrus.Warnf("base ISO version mismatch in release payload. Expected version %s but found %s", metal.Release, payloadRelease) - } -} - // Generate the baseIso func (i *BaseIso) Generate(ctx context.Context, dependencies asset.Parents) error { - var err error - var baseIsoFileName string - - if err := workflowreport.GetReport(ctx).Stage(workflow.StageFetchBaseISO); err != nil { - return err - } + agentManifests := &manifests.AgentManifests{} + registriesConf := &mirror.RegistriesConf{} + agentWorkflow := &workflow.AgentWorkflow{} + clusterInfo := &joiner.ClusterInfo{} + dependencies.Get(agentManifests, registriesConf, agentWorkflow, clusterInfo) - if urlOverride, ok := os.LookupEnv("OPENSHIFT_INSTALL_OS_IMAGE_OVERRIDE"); ok && urlOverride != "" { - logrus.Warn("Found override for OS Image. Please be warned, this is not advised") - baseIsoFileName, err = cache.DownloadImageFile(urlOverride, cache.AgentApplicationName) - } else { - i.setStreamGetter(dependencies) - baseIsoFileName, err = i.retrieveBaseIso(ctx, dependencies) - } + baseIsoFileName, err := rhcos.NewBaseISOFetcher( + i.getRelease(agentManifests, registriesConf), + customStreamGetter(agentWorkflow, clusterInfo)).GetBaseISOFilename(ctx, agentManifests.InfraEnv.Spec.CpuArchitecture) if err == nil { logrus.Debugf("Using base ISO image %s", baseIsoFileName) @@ -169,88 +84,34 @@ func (i *BaseIso) Generate(ctx context.Context, dependencies asset.Parents) erro return errors.Wrap(err, "failed to get base ISO image") } -func (i *BaseIso) setStreamGetter(dependencies asset.Parents) { - if i.streamGetter != nil { - return - } - - agentWorkflow := &workflow.AgentWorkflow{} - clusterInfo := &joiner.ClusterInfo{} - dependencies.Get(agentWorkflow, clusterInfo) - - i.streamGetter = DefaultCoreOSStreamGetter +func customStreamGetter(agentWorkflow *workflow.AgentWorkflow, clusterInfo *joiner.ClusterInfo) rhcos.CoreOSBuildFetcher { if agentWorkflow.Workflow == workflow.AgentWorkflowTypeAddNodes { - i.streamGetter = func(ctx context.Context) (*stream.Stream, error) { + return func(ctx context.Context) (*stream.Stream, error) { return clusterInfo.OSImage, nil } } + return nil } -func (i *BaseIso) getRelease(agentManifests *manifests.AgentManifests, registriesConf *mirror.RegistriesConf) Release { +func (i *BaseIso) getRelease(agentManifests *manifests.AgentManifests, registriesConf *mirror.RegistriesConf) rhcos.ReleasePayload { if i.ocRelease != nil { return i.ocRelease } + if agentManifests.ClusterImageSet == nil { + return nil + } + releaseImage := agentManifests.ClusterImageSet.Spec.ReleaseImage pullSecret := agentManifests.GetPullSecretData() - i.ocRelease = NewRelease( - Config{MaxTries: OcDefaultTries, RetryDelay: OcDefaultRetryDelay}, - releaseImage, pullSecret, registriesConf.MirrorConfig, i.streamGetter) + i.ocRelease = rhcos.NewReleasePayload( + rhcos.ExtractConfig{}, + releaseImage, pullSecret, registriesConf) return i.ocRelease } -func (i *BaseIso) retrieveBaseIso(ctx context.Context, dependencies asset.Parents) (string, error) { - // use the GetIso function to get the BaseIso from the release payload - agentManifests := &manifests.AgentManifests{} - registriesConf := &mirror.RegistriesConf{} - dependencies.Get(agentManifests, registriesConf) - - // Default iso archName to x86_64. - archName := arch.RpmArch(types.ArchitectureAMD64) - - if agentManifests.ClusterImageSet != nil { - // If specified, use InfraEnv.Spec.CpuArchitecture for iso archName - if agentManifests.InfraEnv.Spec.CpuArchitecture != "" { - archName = agentManifests.InfraEnv.Spec.CpuArchitecture - } - - // If we have the image registry location and 'oc' command is available then get from release payload - ocRelease := i.getRelease(agentManifests, registriesConf) - logrus.Info("Extracting base ISO from release payload") - - if err := workflowreport.GetReport(ctx).SubStage(workflow.StageFetchBaseISOExtract); err != nil { - return "", err - } - baseIsoFileName, err := ocRelease.GetBaseIso(archName) - if err == nil { - if err := workflowreport.GetReport(ctx).SubStage(workflow.StageFetchBaseISOVerify); err != nil { - return "", err - } - i.checkReleasePayloadBaseISOVersion(ctx, ocRelease, archName) - - logrus.Debugf("Extracted base ISO image %s from release payload", baseIsoFileName) - i.File = &asset.File{Filename: baseIsoFileName} - return baseIsoFileName, nil - } - - if errors.Is(err, fs.ErrNotExist) { - // if image extract failed to extract the iso that architecture may be missing from release image - return "", fmt.Errorf("base ISO for %s not found in release image, check release image architecture", archName) - } - if !errors.Is(err, &exec.Error{}) { // Already warned about missing oc binary - logrus.Warning("Failed to extract base ISO from release payload - check registry configuration") - } - } - - logrus.Info("Downloading base ISO") - if err := workflowreport.GetReport(ctx).SubStage(workflow.StageFetchBaseISODownload); err != nil { - return "", err - } - return i.downloadIso(ctx, archName) -} - // Files returns the files generated by the asset. func (i *BaseIso) Files() []*asset.File { diff --git a/pkg/asset/agent/image/ignition.go b/pkg/asset/agent/image/ignition.go index e44901cc4c4..f3809230038 100644 --- a/pkg/asset/agent/image/ignition.go +++ b/pkg/asset/agent/image/ignition.go @@ -14,7 +14,6 @@ import ( "github.com/coreos/ignition/v2/config/util" igntypes "github.com/coreos/ignition/v2/config/v3_2/types" "github.com/coreos/stream-metadata-go/arch" - "github.com/coreos/stream-metadata-go/stream" "github.com/pkg/errors" "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" @@ -35,6 +34,7 @@ import ( "github.com/openshift/installer/pkg/asset/ignition" "github.com/openshift/installer/pkg/asset/ignition/bootstrap" "github.com/openshift/installer/pkg/asset/password" + "github.com/openshift/installer/pkg/asset/rhcos" "github.com/openshift/installer/pkg/asset/tls" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/agent" @@ -127,6 +127,7 @@ func (a *Ignition) Generate(ctx context.Context, dependencies asset.Parents) err authConfig := &gencrypto.AuthConfig{} infraEnvAsset := &common.InfraEnvID{} dependencies.Get(agentManifests, agentConfigAsset, agentHostsAsset, extraManifests, fencingCredentials, authConfig, agentWorkflow, infraEnvAsset) + clusterInfo := &joiner.ClusterInfo{} if err := workflowreport.GetReport(ctx).Stage(workflow.StageIgnition); err != nil { return err @@ -163,7 +164,6 @@ func (a *Ignition) Generate(ctx context.Context, dependencies asset.Parents) err enabledServices := getDefaultEnabledServices() openshiftVersion := "" var err error - var streamGetter CoreOSBuildFetcher switch agentWorkflow.Workflow { case workflow.AgentWorkflowTypeInstall: @@ -191,10 +191,8 @@ func (a *Ignition) Generate(ctx context.Context, dependencies asset.Parents) err if err != nil { return err } - streamGetter = DefaultCoreOSStreamGetter case workflow.AgentWorkflowTypeAddNodes: - clusterInfo := &joiner.ClusterInfo{} addNodesConfig := &joiner.AddNodesConfig{} importClusterConfig := &joiner.ImportClusterConfig{} dependencies.Get(clusterInfo, addNodesConfig, importClusterConfig) @@ -221,9 +219,6 @@ func (a *Ignition) Generate(ctx context.Context, dependencies asset.Parents) err // Version matches the source cluster one openshiftVersion = clusterInfo.Version - streamGetter = func(ctx context.Context) (*stream.Stream, error) { - return clusterInfo.OSImage, nil - } // If defined, add the ignition endpoints if err := addDay2ClusterConfigFiles(&config, *clusterInfo, *importClusterConfig); err != nil { return err @@ -272,7 +267,7 @@ func (a *Ignition) Generate(ctx context.Context, dependencies asset.Parents) err infraEnvID := infraEnvAsset.ID logrus.Debug("Generated random infra-env id ", infraEnvID) - osImage, err := getOSImagesInfo(archName, openshiftVersion, streamGetter) + osImage, err := getOSImagesInfo(ctx, archName, openshiftVersion, customStreamGetter(agentWorkflow, clusterInfo)) if err != nil { return err } @@ -731,26 +726,17 @@ func addExtraManifests(config *igntypes.Config, extraManifests *manifests.ExtraM return nil } -func getOSImagesInfo(cpuArch string, openshiftVersion string, streamGetter CoreOSBuildFetcher) (*models.OsImage, error) { - st, err := streamGetter(context.Background()) - if err != nil { - return nil, err - } - +func getOSImagesInfo(ctx context.Context, cpuArch string, openshiftVersion string, streamGetter rhcos.CoreOSBuildFetcher) (*models.OsImage, error) { osImage := &models.OsImage{ CPUArchitecture: &cpuArch, } osImage.OpenshiftVersion = &openshiftVersion - streamArch, err := st.GetArchitecture(cpuArch) + artifacts, err := rhcos.GetMetalArtifact(ctx, cpuArch, streamGetter) if err != nil { return nil, err } - artifacts, ok := streamArch.Artifacts["metal"] - if !ok { - return nil, fmt.Errorf("failed to retrieve coreos metal info for architecture %s", cpuArch) - } osImage.Version = &artifacts.Release isoFormat, ok := artifacts.Formats["iso"] diff --git a/pkg/asset/agent/image/oc_test.go b/pkg/asset/agent/image/oc_test.go index c7d56e59cca..dfe54bd0059 100644 --- a/pkg/asset/agent/image/oc_test.go +++ b/pkg/asset/agent/image/oc_test.go @@ -40,7 +40,9 @@ func TestGetIcspContents(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - contents, err := getIcspContents(tc.mirrorConfig) + contents, err := (&mirror.RegistriesConf{ + MirrorConfig: tc.mirrorConfig, + }).GetICSPContents() if tc.expectedError != "" { assert.Equal(t, tc.expectedError, err.Error()) } else { diff --git a/pkg/asset/agent/image/unconfigured_ignition.go b/pkg/asset/agent/image/unconfigured_ignition.go index 2e3f8dcb143..ed546c15412 100644 --- a/pkg/asset/agent/image/unconfigured_ignition.go +++ b/pkg/asset/agent/image/unconfigured_ignition.go @@ -90,7 +90,7 @@ func (a *UnconfiguredIgnition) Dependencies() []asset.Asset { // Generate generates the agent installer unconfigured ignition. // The appliance embeds both registries.conf and CA certificates in the image's // system ignition for the bootstrap phase. After first reboot, MCO manages these. -func (a *UnconfiguredIgnition) Generate(_ context.Context, dependencies asset.Parents) error { +func (a *UnconfiguredIgnition) Generate(ctx context.Context, dependencies asset.Parents) error { agentWorkflow := &workflow.AgentWorkflow{} infraEnvAsset := &manifests.InfraEnvFile{} infraEnvIDAsset := &common.InfraEnvID{} @@ -143,7 +143,7 @@ func (a *UnconfiguredIgnition) Generate(_ context.Context, dependencies asset.Pa if err != nil { return err } - osImage, err := getOSImagesInfo(archName, openshiftVersion, DefaultCoreOSStreamGetter) + osImage, err := getOSImagesInfo(ctx, archName, openshiftVersion, nil) if err != nil { return err } diff --git a/pkg/asset/agent/mirror/registriesconf.go b/pkg/asset/agent/mirror/registriesconf.go index aa5e3eb971e..1c3950e2b62 100644 --- a/pkg/asset/agent/mirror/registriesconf.go +++ b/pkg/asset/agent/mirror/registriesconf.go @@ -2,15 +2,19 @@ package mirror import ( "context" + "encoding/json" "fmt" "os" "path/filepath" "regexp" "github.com/containers/image/v5/pkg/sysregistriesv2" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" "github.com/pelletier/go-toml" "github.com/pkg/errors" "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" configv1 "github.com/openshift/api/config/v1" "github.com/openshift/installer/pkg/asset" @@ -229,6 +233,42 @@ func (i *RegistriesConf) generateRegistriesConf(imageDigestSources []types.Image return nil } +// HasMirrors returns whether there are any mirrors configured. +func (i *RegistriesConf) HasMirrors() bool { + return len(i.MirrorConfig) > 0 +} + +// GetICSPContents converts the data in registries.conf into ICSP format. +func (i *RegistriesConf) GetICSPContents() ([]byte, error) { + icsp := operatorv1alpha1.ImageContentSourcePolicy{ + TypeMeta: metav1.TypeMeta{ + APIVersion: operatorv1alpha1.SchemeGroupVersion.String(), + Kind: "ImageContentSourcePolicy", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "image-policy", + // not namespaced + }, + } + + icsp.Spec.RepositoryDigestMirrors = make([]operatorv1alpha1.RepositoryDigestMirrors, len(i.MirrorConfig)) + for i, mirrorRegistries := range i.MirrorConfig { + icsp.Spec.RepositoryDigestMirrors[i] = operatorv1alpha1.RepositoryDigestMirrors{Source: mirrorRegistries.Location, Mirrors: mirrorRegistries.Mirrors} + } + + // Convert to json first so json tags are handled + jsonData, err := json.Marshal(&icsp) + if err != nil { + return nil, err + } + contents, err := yaml.JSONToYAML(jsonData) + if err != nil { + return nil, err + } + + return contents, nil +} + // Files returns the files generated by the asset. func (i *RegistriesConf) Files() []*asset.File { if i.File != nil { diff --git a/pkg/asset/rhcos/iso.go b/pkg/asset/rhcos/iso.go new file mode 100644 index 00000000000..d50835acc91 --- /dev/null +++ b/pkg/asset/rhcos/iso.go @@ -0,0 +1,177 @@ +package rhcos + +import ( + "context" + "errors" + "fmt" + "io/fs" + "os" + "os/exec" + "time" + + "github.com/coreos/stream-metadata-go/arch" + "github.com/coreos/stream-metadata-go/stream" + "github.com/sirupsen/logrus" + + "github.com/openshift/installer/pkg/asset/agent/workflow" + workflowreport "github.com/openshift/installer/pkg/asset/agent/workflow/report" + "github.com/openshift/installer/pkg/rhcos" + "github.com/openshift/installer/pkg/rhcos/cache" + "github.com/openshift/installer/pkg/types" +) + +// BaseIso generates the base ISO file for the image. +type BaseIso struct { + streamGetter CoreOSBuildFetcher + ocRelease ReleasePayload +} + +// CoreOSBuildFetcher will be to used to switch the source of the coreos metadata. +type CoreOSBuildFetcher func(ctx context.Context) (*stream.Stream, error) + +// defaultCoreOSStreamGetter uses the pinned metadata. +var defaultCoreOSStreamGetter = rhcos.FetchCoreOSBuild + +// NewBaseISOFetcher returns a struct that can be used to fetch a base ISO using +// the default method. +func NewBaseISOFetcher(ocRelease ReleasePayload, streamGetter CoreOSBuildFetcher) *BaseIso { + if streamGetter == nil { + streamGetter = defaultCoreOSStreamGetter + } + return &BaseIso{ + streamGetter: streamGetter, + ocRelease: ocRelease, + } +} + +// GetBaseISOFilename retrieves the base ISO for the given architecture +// (possibly from the cache) and returns its location on disk. +func (i *BaseIso) GetBaseISOFilename(ctx context.Context, arch string) (baseIsoFileName string, err error) { + err = workflowreport.GetReport(ctx).Stage(workflow.StageFetchBaseISO) + if err != nil { + return + } + + if urlOverride, ok := os.LookupEnv("OPENSHIFT_INSTALL_OS_IMAGE_OVERRIDE"); ok && urlOverride != "" { + logrus.Warn("Found override for OS Image. Please be warned, this is not advised") + baseIsoFileName, err = cache.DownloadImageFile(urlOverride, cache.AgentApplicationName) + } else { + baseIsoFileName, err = i.retrieveBaseIso(ctx, arch) + } + + return +} + +// GetMetalArtifact returns the CoreOS artifacts for metal for a given arch +// from a given stream. +func GetMetalArtifact(ctx context.Context, archName string, streamGetter CoreOSBuildFetcher) (stream.PlatformArtifacts, error) { + if streamGetter == nil { + streamGetter = defaultCoreOSStreamGetter + } + + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + // Get the ISO to use from rhcos.json + st, err := streamGetter(ctx) + if err != nil { + return stream.PlatformArtifacts{}, err + } + + streamArch, err := st.GetArchitecture(archName) + if err != nil { + return stream.PlatformArtifacts{}, err + } + + metal, ok := streamArch.Artifacts["metal"] + if !ok { + return stream.PlatformArtifacts{}, fmt.Errorf("coreOs stream data not found for 'metal' artifact") + } + + return metal, nil +} + +// Download the ISO using the URL in rhcos.json. +func (i *BaseIso) downloadIso(ctx context.Context, archName string) (string, error) { + metal, err := GetMetalArtifact(ctx, archName, i.streamGetter) + if err != nil { + return "", err + } + + format, ok := metal.Formats["iso"] + if !ok { + return "", fmt.Errorf("no ISO found to download for %s", archName) + } + + url := format.Disk.Location + sha := format.Disk.Sha256 + cachedImage, err := cache.DownloadImageFileWithSha(url, cache.AgentApplicationName, sha) + if err != nil { + return "", fmt.Errorf("failed to download base ISO image %s: %w", url, err) + } + + return cachedImage, nil +} + +func (i *BaseIso) checkReleasePayloadBaseISOVersion(ctx context.Context, r ReleasePayload, archName string) { + logrus.Debugf("Checking release payload base ISO version") + + // Get current release payload CoreOS version + payloadRelease, err := r.GetBaseIsoVersion(archName) + if err != nil { + logrus.Warnf("unable to determine base ISO version: %s", err.Error()) + return + } + + // Get pinned version from installer + metal, err := GetMetalArtifact(ctx, archName, i.streamGetter) + if err != nil { + logrus.Warnf("unable to determine base ISO version: %s", err.Error()) + return + } + + // Check for a mismatch + if metal.Release != payloadRelease { + logrus.Warnf("base ISO version mismatch in release payload. Expected version %s but found %s", metal.Release, payloadRelease) + } +} + +func (i *BaseIso) retrieveBaseIso(ctx context.Context, archName string) (string, error) { + // Default iso archName to x86_64. + if archName == "" { + archName = arch.RpmArch(types.ArchitectureAMD64) + } + + if i.ocRelease != nil { + // If we have the image registry location and 'oc' command is available then get from release payload + logrus.Info("Extracting base ISO from release payload") + + if err := workflowreport.GetReport(ctx).SubStage(workflow.StageFetchBaseISOExtract); err != nil { + return "", err + } + baseIsoFileName, err := i.ocRelease.GetBaseIso(archName, i.streamGetter) + if err == nil { + if err := workflowreport.GetReport(ctx).SubStage(workflow.StageFetchBaseISOVerify); err != nil { + return "", err + } + i.checkReleasePayloadBaseISOVersion(ctx, i.ocRelease, archName) + + logrus.Debugf("Extracted base ISO image %s from release payload", baseIsoFileName) + return baseIsoFileName, nil + } + + if errors.Is(err, fs.ErrNotExist) { + // if image extract failed to extract the iso that architecture may be missing from release image + return "", fmt.Errorf("base ISO for %s not found in release image, check release image architecture", archName) + } + if !errors.Is(err, &exec.Error{}) { // Already warned about missing oc binary + logrus.Warning("Failed to extract base ISO from release payload - check registry configuration") + } + } + + logrus.Info("Downloading base ISO") + if err := workflowreport.GetReport(ctx).SubStage(workflow.StageFetchBaseISODownload); err != nil { + return "", err + } + return i.downloadIso(ctx, archName) +} diff --git a/pkg/asset/agent/image/baseiso_test.go b/pkg/asset/rhcos/iso_test.go similarity index 62% rename from pkg/asset/agent/image/baseiso_test.go rename to pkg/asset/rhcos/iso_test.go index 2b2892b3f85..60bbbb20fdd 100644 --- a/pkg/asset/agent/image/baseiso_test.go +++ b/pkg/asset/rhcos/iso_test.go @@ -1,4 +1,4 @@ -package image +package rhcos import ( "context" @@ -12,24 +12,14 @@ import ( "github.com/coreos/stream-metadata-go/stream" "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - - "github.com/openshift/assisted-service/api/v1beta1" - v1 "github.com/openshift/hive/apis/hive/v1" - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/agent/joiner" - "github.com/openshift/installer/pkg/asset/agent/manifests" - "github.com/openshift/installer/pkg/asset/agent/mirror" - "github.com/openshift/installer/pkg/asset/agent/workflow" ) -func TestBaseIso_Generate(t *testing.T) { +func TestBaseIso(t *testing.T) { ocReleaseImage := "416.94.202402130130-0" ocBaseIsoFilename := "openshift-4.16" cases := []struct { name string - dependencies []asset.Asset envVarOsImageOverrideValue string getIsoError error expectedBaseIsoFilename string @@ -41,56 +31,17 @@ func TestBaseIso_Generate(t *testing.T) { expectedBaseIsoFilename: "openshift-4.15", }, { - name: "default", - dependencies: []asset.Asset{ - &workflow.AgentWorkflow{Workflow: workflow.AgentWorkflowTypeInstall}, - &joiner.ClusterInfo{}, - &manifests.AgentManifests{ - InfraEnv: &v1beta1.InfraEnv{}, - ClusterImageSet: &v1.ClusterImageSet{ - Spec: v1.ClusterImageSetSpec{ - ReleaseImage: ocReleaseImage, - }, - }, - PullSecret: &corev1.Secret{ - StringData: map[string]string{ - ".dockerconfigjson": "supersecret", - }, - }, - }, - &mirror.RegistriesConf{}, - }, + name: "default", expectedBaseIsoFilename: ocBaseIsoFilename, }, { - name: "direct download if oc is not available", - dependencies: []asset.Asset{ - &workflow.AgentWorkflow{Workflow: workflow.AgentWorkflowTypeInstall}, - &joiner.ClusterInfo{}, - &manifests.AgentManifests{ - InfraEnv: &v1beta1.InfraEnv{}, - ClusterImageSet: &v1.ClusterImageSet{ - Spec: v1.ClusterImageSetSpec{ - ReleaseImage: ocReleaseImage, - }, - }, - PullSecret: &corev1.Secret{ - StringData: map[string]string{ - ".dockerconfigjson": "supersecret", - }, - }, - }, - &mirror.RegistriesConf{}, - }, + name: "direct download if oc is not available", getIsoError: &exec.Error{}, expectedBaseIsoFilename: ocReleaseImage, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - dependencies := asset.Parents{} - dependencies.Add(tc.dependencies...) - // Setup a fake http server, to serve the future download request. svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Answer with a fixed size randomly filled buffer @@ -120,13 +71,13 @@ func TestBaseIso_Generate(t *testing.T) { assert.NoError(t, err) }() - baseIso := &BaseIso{ - ocRelease: &mockRelease{ + fetcher := NewBaseISOFetcher( + &mockRelease{ isoBaseVersion: ocReleaseImage, baseIsoFileName: ocBaseIsoFilename, baseIsoError: tc.getIsoError, }, - streamGetter: func(ctx context.Context) (*stream.Stream, error) { + func(ctx context.Context) (*stream.Stream, error) { return &stream.Stream{ Architectures: map[string]stream.Arch{ "x86_64": { @@ -145,13 +96,12 @@ func TestBaseIso_Generate(t *testing.T) { }, }, }, nil - }, - } - err = baseIso.Generate(context.Background(), dependencies) + }) + filename, err := fetcher.GetBaseISOFilename(context.Background(), "") if tc.expectedError == "" { assert.NoError(t, err) - assert.Regexp(t, tc.expectedBaseIsoFilename, baseIso.File.Filename) + assert.Regexp(t, tc.expectedBaseIsoFilename, filename) } else { assert.Equal(t, tc.expectedError, err.Error()) } @@ -165,7 +115,7 @@ type mockRelease struct { baseIsoError error } -func (m *mockRelease) GetBaseIso(architecture string) (string, error) { +func (m *mockRelease) GetBaseIso(architecture string, streamGetter CoreOSBuildFetcher) (string, error) { if m.baseIsoError != nil { return "", m.baseIsoError } diff --git a/pkg/asset/agent/image/releaseextract.go b/pkg/asset/rhcos/releaseextract.go similarity index 69% rename from pkg/asset/agent/image/releaseextract.go rename to pkg/asset/rhcos/releaseextract.go index 772af69b386..06dcbfd6d8b 100644 --- a/pkg/asset/agent/image/releaseextract.go +++ b/pkg/asset/rhcos/releaseextract.go @@ -1,4 +1,4 @@ -package image +package rhcos import ( "bytes" @@ -20,12 +20,8 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/thedevsaddam/retry" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/yaml" - operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" "github.com/openshift/installer/pkg/asset/agent" - "github.com/openshift/installer/pkg/asset/agent/mirror" "github.com/openshift/installer/pkg/rhcos/cache" ) @@ -34,46 +30,55 @@ const ( coreOsFileName = "/coreos/coreos-%s.iso" coreOsSha256FileName = "/coreos/coreos-%s.iso.sha256" coreOsStreamFileName = "/coreos/coreos-stream.json" - // OcDefaultTries is the number of times to execute the oc command on failures. - OcDefaultTries = 5 - // OcDefaultRetryDelay is the time between retries. - OcDefaultRetryDelay = time.Second * 5 + // ocDefaultTries is the number of times to execute the oc command on failures. + ocDefaultTries = 5 + // ocDefaultRetryDelay is the time between retries. + ocDefaultRetryDelay = time.Second * 5 ) -// Config is used to set up the retries for extracting the base ISO. -type Config struct { +// ExtractConfig is used to set up the retries for extracting the base ISO. +type ExtractConfig struct { MaxTries uint RetryDelay time.Duration } -// Release is the interface to use the oc command to the get image info. -type Release interface { - GetBaseIso(architecture string) (string, error) +// ReleasePayload is the interface to use the oc command to the get image info. +type ReleasePayload interface { + GetBaseIso(architecture string, streamGetter CoreOSBuildFetcher) (string, error) GetBaseIsoVersion(architecture string) (string, error) ExtractFile(image string, filename string, architecture string) ([]string, error) } -type release struct { - config Config +type MirrorConfig interface { + HasMirrors() bool + GetICSPContents() ([]byte, error) +} + +type releasePayload struct { + config ExtractConfig releaseImage string pullSecret string - mirrorConfig []mirror.RegistriesConfig - streamGetter CoreOSBuildFetcher + mirrorConfig MirrorConfig } -// NewRelease is used to set up the executor to run oc commands. -func NewRelease(config Config, releaseImage string, pullSecret string, mirrorConfig []mirror.RegistriesConfig, streamGetter CoreOSBuildFetcher) Release { - return &release{ +// NewReleasePayload is used to set up the executor to run oc commands. +func NewReleasePayload(config ExtractConfig, releaseImage string, pullSecret string, mirrorConfig MirrorConfig) ReleasePayload { + if config.MaxTries == 0 { + config.MaxTries = ocDefaultTries + } + if config.RetryDelay == 0 { + config.RetryDelay = ocDefaultRetryDelay + } + return &releasePayload{ config: config, releaseImage: releaseImage, pullSecret: pullSecret, mirrorConfig: mirrorConfig, - streamGetter: streamGetter, } } // ExtractFile extracts the specified file from the given image name, and store it in the cache dir. -func (r *release) ExtractFile(image string, filename string, architecture string) ([]string, error) { +func (r *releasePayload) ExtractFile(image string, filename string, architecture string) ([]string, error) { imagePullSpec, err := r.getImageFromRelease(image, architecture) if err != nil { return nil, err @@ -92,7 +97,7 @@ func (r *release) ExtractFile(image string, filename string, architecture string } // Get the CoreOS ISO from the releaseImage. -func (r *release) GetBaseIso(architecture string) (string, error) { +func (r *releasePayload) GetBaseIso(architecture string, streamGetter CoreOSBuildFetcher) (string, error) { // Get the machine-os-images pullspec from the release and use that to get the CoreOS ISO image, err := r.getImageFromRelease(machineOsImageName, architecture) if err != nil { @@ -112,7 +117,7 @@ func (r *release) GetBaseIso(architecture string) (string, error) { } if cachedFile != "" { logrus.Info("Verifying cached file") - valid, err := r.verifyCacheFile(image, cachedFile, architecture) + valid, err := r.verifyCacheFile(image, cachedFile, architecture, streamGetter) if err != nil { return "", err } @@ -131,7 +136,7 @@ func (r *release) GetBaseIso(architecture string) (string, error) { return path[0], err } -func (r *release) GetBaseIsoVersion(architecture string) (string, error) { +func (r *releasePayload) GetBaseIsoVersion(architecture string) (string, error) { files, err := r.ExtractFile(machineOsImageName, coreOsStreamFileName, architecture) if err != nil { return "", err @@ -163,11 +168,11 @@ func (r *release) GetBaseIsoVersion(architecture string) (string, error) { return "", errors.New("unable to determine CoreOS release version") } -func (r *release) getImageFromRelease(imageName string, architecture string) (string, error) { +func (r *releasePayload) getImageFromRelease(imageName string, architecture string) (string, error) { // This requires the 'oc' command so make sure its available _, err := exec.LookPath("oc") if err != nil { - if len(r.mirrorConfig) > 0 { + if r.mirrorConfig.HasMirrors() { logrus.Warning("Unable to validate mirror config because \"oc\" command is not available") } else { logrus.Debug("Skipping ISO extraction; \"oc\" command is not available") @@ -189,15 +194,16 @@ func (r *release) getImageFromRelease(imageName string, architecture string) (st filterbyos, insecure, } - if len(r.mirrorConfig) > 0 { + if r.mirrorConfig.HasMirrors() { logrus.Debugf("Using mirror configuration") - icspFile, err := getIcspFileFromRegistriesConfig(r.mirrorConfig) + mirrorArg, cleanup, err := getMirrorArg(r.mirrorConfig) if err != nil { return "", err } - defer removeIcspFile(icspFile) - icspfile := "--icsp-file=" + icspFile - cmd = append(cmd, icspfile) + if mirrorArg != "" { + defer cleanup() + cmd = append(cmd, mirrorArg) + } } cmd = append(cmd, r.releaseImage) logrus.Debugf("Fetching image from OCP release (%s)", cmd) @@ -212,7 +218,7 @@ func (r *release) getImageFromRelease(imageName string, architecture string) (st return image, nil } -func (r *release) extractFileFromImage(image, file, cacheDir string, architecture string) ([]string, error) { +func (r *releasePayload) extractFileFromImage(image, file, cacheDir string, architecture string) ([]string, error) { archName := arch.GoArch(architecture) extractpath := "--path=" + file + ":" + cacheDir filterbyos := "--filter-by-os=linux/" + archName @@ -228,14 +234,15 @@ func (r *release) extractFileFromImage(image, file, cacheDir string, architectur "--confirm", } - if len(r.mirrorConfig) > 0 { - icspFile, err := getIcspFileFromRegistriesConfig(r.mirrorConfig) + if r.mirrorConfig.HasMirrors() { + mirrorArg, cleanup, err := getMirrorArg(r.mirrorConfig) if err != nil { return nil, err } - defer removeIcspFile(icspFile) - icspfile := "--icsp-file=" + icspFile - cmd = append(cmd, icspfile) + if mirrorArg != "" { + defer cleanup() + cmd = append(cmd, mirrorArg) + } } path := filepath.Join(cacheDir, path.Base(file)) // Remove file if it exists @@ -262,12 +269,12 @@ func (r *release) extractFileFromImage(image, file, cacheDir string, architectur } // Get hash from rhcos.json. -func (r *release) getHashFromInstaller(architecture string) (bool, string) { +func (r *releasePayload) getHashFromInstaller(architecture string, streamGetter CoreOSBuildFetcher) (bool, string) { // Get hash from metadata in the installer ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() - st, err := r.streamGetter(ctx) + st, err := streamGetter(ctx) if err != nil { return false, "" } @@ -295,7 +302,7 @@ func matchingHash(imageSha []byte, sha string) bool { } // Check if there is a different base ISO in the release payload. -func (r *release) verifyCacheFile(image, file, architecture string) (bool, error) { +func (r *releasePayload) verifyCacheFile(image, file, architecture string, streamGetter CoreOSBuildFetcher) (bool, error) { // Get hash of cached file f, err := os.Open(file) if err != nil { @@ -310,7 +317,7 @@ func (r *release) verifyCacheFile(image, file, architecture string) (bool, error fileSha := h.Sum(nil) // Check if the hash of cached file matches hash in rhcos.json - found, rhcosSha := r.getHashFromInstaller(architecture) + found, rhcosSha := r.getHashFromInstaller(architecture, streamGetter) if found && matchingHash(fileSha, rhcosSha) { logrus.Debug("Found matching hash in installer metadata") return true, nil @@ -361,64 +368,32 @@ func removeCacheFile(path string) error { } // Create a temporary file containing the ImageContentPolicySources. -func getIcspFileFromRegistriesConfig(mirrorConfig []mirror.RegistriesConfig) (string, error) { - contents, err := getIcspContents(mirrorConfig) - if err != nil { - return "", err - } - if contents == nil { +func getMirrorArg(mirrorConfig MirrorConfig) (string, func(), error) { + if !mirrorConfig.HasMirrors() { logrus.Debugf("No registry entries to build ICSP file") - return "", nil + return "", nil, nil + } + + contents, err := mirrorConfig.GetICSPContents() + if err != nil { + return "", nil, err } icspFile, err := os.CreateTemp("", "icsp-file") if err != nil { - return "", err + return "", nil, err } if _, err := icspFile.Write(contents); err != nil { icspFile.Close() os.Remove(icspFile.Name()) - return "", err + return "", nil, err } icspFile.Close() - return icspFile.Name(), nil -} - -// Convert the data in registries.conf into ICSP format. -func getIcspContents(mirrorConfig []mirror.RegistriesConfig) ([]byte, error) { - icsp := operatorv1alpha1.ImageContentSourcePolicy{ - TypeMeta: metav1.TypeMeta{ - APIVersion: operatorv1alpha1.SchemeGroupVersion.String(), - Kind: "ImageContentSourcePolicy", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "image-policy", - // not namespaced - }, - } - - icsp.Spec.RepositoryDigestMirrors = make([]operatorv1alpha1.RepositoryDigestMirrors, len(mirrorConfig)) - for i, mirrorRegistries := range mirrorConfig { - icsp.Spec.RepositoryDigestMirrors[i] = operatorv1alpha1.RepositoryDigestMirrors{Source: mirrorRegistries.Location, Mirrors: mirrorRegistries.Mirrors} - } - - // Convert to json first so json tags are handled - jsonData, err := json.Marshal(&icsp) - if err != nil { - return nil, err - } - contents, err := yaml.JSONToYAML(jsonData) - if err != nil { - return nil, err + remove := func() { + os.Remove(icspFile.Name()) } - return contents, nil -} - -func removeIcspFile(filename string) { - if filename != "" { - os.Remove(filename) - } + return "--icsp-file=" + icspFile.Name(), remove, nil } From ff37207cf33ad832ad71217dff43d2bda8142c8f Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 31 Mar 2025 10:31:06 +1300 Subject: [PATCH 02/14] Move MirrorConfig definition to types package --- pkg/asset/agent/image/agentartifacts.go | 5 ++- pkg/asset/agent/image/baseiso.go | 7 +-- pkg/asset/agent/image/ignition_test.go | 15 ++++--- pkg/asset/agent/image/oc_test.go | 12 +++--- pkg/asset/agent/mirror/registriesconf.go | 52 ++-------------------- pkg/asset/rhcos/releaseextract.go | 12 ++---- pkg/types/mirror.go | 55 ++++++++++++++++++++++++ pkg/types/zz_generated.deepcopy.go | 43 ++++++++++++++++++ 8 files changed, 125 insertions(+), 76 deletions(-) create mode 100644 pkg/types/mirror.go diff --git a/pkg/asset/agent/image/agentartifacts.go b/pkg/asset/agent/image/agentartifacts.go index a96dc80f704..5b498773752 100644 --- a/pkg/asset/agent/image/agentartifacts.go +++ b/pkg/asset/agent/image/agentartifacts.go @@ -21,6 +21,7 @@ import ( "github.com/openshift/installer/pkg/asset/agent/workflow" workflowreport "github.com/openshift/installer/pkg/asset/agent/workflow/report" "github.com/openshift/installer/pkg/asset/rhcos" + "github.com/openshift/installer/pkg/types" ) const ( @@ -116,7 +117,7 @@ func (a *AgentArtifacts) Generate(ctx context.Context, dependencies asset.Parent if err := workflowreport.GetReport(ctx).SubStage(workflow.StageAgentArtifactsAgentTUI); err != nil { return err } - agentTuiFiles, err = a.fetchAgentTuiFiles(agentManifests.ClusterImageSet.Spec.ReleaseImage, agentManifests.GetPullSecretData(), registriesConf) + agentTuiFiles, err = a.fetchAgentTuiFiles(agentManifests.ClusterImageSet.Spec.ReleaseImage, agentManifests.GetPullSecretData(), registriesConf.MirrorConfig) if err != nil { return err } @@ -133,7 +134,7 @@ func (a *AgentArtifacts) Generate(ctx context.Context, dependencies asset.Parent return nil } -func (a *AgentArtifacts) fetchAgentTuiFiles(releaseImage string, pullSecret string, mirrorConfig rhcos.MirrorConfig) ([]string, error) { +func (a *AgentArtifacts) fetchAgentTuiFiles(releaseImage string, pullSecret string, mirrorConfig types.MirrorConfig) ([]string, error) { release := rhcos.NewReleasePayload( rhcos.ExtractConfig{}, releaseImage, pullSecret, mirrorConfig) diff --git a/pkg/asset/agent/image/baseiso.go b/pkg/asset/agent/image/baseiso.go index adfdb426173..16fd508a3d2 100644 --- a/pkg/asset/agent/image/baseiso.go +++ b/pkg/asset/agent/image/baseiso.go @@ -16,6 +16,7 @@ import ( "github.com/openshift/installer/pkg/asset/agent/mirror" "github.com/openshift/installer/pkg/asset/agent/workflow" "github.com/openshift/installer/pkg/asset/rhcos" + "github.com/openshift/installer/pkg/types" ) // BaseIso generates the base ISO file for the image @@ -71,7 +72,7 @@ func (i *BaseIso) Generate(ctx context.Context, dependencies asset.Parents) erro dependencies.Get(agentManifests, registriesConf, agentWorkflow, clusterInfo) baseIsoFileName, err := rhcos.NewBaseISOFetcher( - i.getRelease(agentManifests, registriesConf), + i.getRelease(agentManifests, registriesConf.MirrorConfig), customStreamGetter(agentWorkflow, clusterInfo)).GetBaseISOFilename(ctx, agentManifests.InfraEnv.Spec.CpuArchitecture) if err == nil { @@ -93,7 +94,7 @@ func customStreamGetter(agentWorkflow *workflow.AgentWorkflow, clusterInfo *join return nil } -func (i *BaseIso) getRelease(agentManifests *manifests.AgentManifests, registriesConf *mirror.RegistriesConf) rhcos.ReleasePayload { +func (i *BaseIso) getRelease(agentManifests *manifests.AgentManifests, mirrorConfig types.MirrorConfig) rhcos.ReleasePayload { if i.ocRelease != nil { return i.ocRelease } @@ -107,7 +108,7 @@ func (i *BaseIso) getRelease(agentManifests *manifests.AgentManifests, registrie i.ocRelease = rhcos.NewReleasePayload( rhcos.ExtractConfig{}, - releaseImage, pullSecret, registriesConf) + releaseImage, pullSecret, mirrorConfig) return i.ocRelease } diff --git a/pkg/asset/agent/image/ignition_test.go b/pkg/asset/agent/image/ignition_test.go index 9b5e010b673..8716d89a991 100644 --- a/pkg/asset/agent/image/ignition_test.go +++ b/pkg/asset/agent/image/ignition_test.go @@ -32,6 +32,7 @@ import ( "github.com/openshift/installer/pkg/asset/agent/workflow" "github.com/openshift/installer/pkg/asset/password" "github.com/openshift/installer/pkg/asset/tls" + "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/agent" ) @@ -730,7 +731,7 @@ func TestIgnition_getMirrorFromRelease(t *testing.T) { Filename: "registries.conf", Data: []byte(""), }, - MirrorConfig: []mirror.RegistriesConfig{ + MirrorConfig: types.MirrorConfig{ { Location: "some.registry.org/release", Mirrors: []string{"some.mirror.org"}, @@ -747,7 +748,7 @@ func TestIgnition_getMirrorFromRelease(t *testing.T) { Filename: "registries.conf", Data: []byte(""), }, - MirrorConfig: []mirror.RegistriesConfig{ + MirrorConfig: types.MirrorConfig{ { Location: "registry.ci.openshift.org/ocp/release", Mirrors: []string{"virthost.ostest.test.metalkube.org:5000/localimages/local-release-image"}, @@ -764,7 +765,7 @@ func TestIgnition_getMirrorFromRelease(t *testing.T) { Filename: "registries.conf", Data: []byte(""), }, - MirrorConfig: []mirror.RegistriesConfig{ + MirrorConfig: types.MirrorConfig{ { Location: "quay.io/openshift-release-dev/ocp-v4.0-art-dev", Mirrors: []string{"localhost:5000/openshift4/openshift/release"}, @@ -785,7 +786,7 @@ func TestIgnition_getMirrorFromRelease(t *testing.T) { Filename: "registries.conf", Data: []byte(""), }, - MirrorConfig: []mirror.RegistriesConfig{ + MirrorConfig: types.MirrorConfig{ { Location: "quay.io/openshift-release-dev/ocp-v4.0-art-dev", Mirrors: []string{"virthost.ostest.test.metalkube.org:5000/localimages/ocp-release", @@ -830,7 +831,7 @@ func TestIgnition_getPublicContainerRegistries(t *testing.T) { Filename: "registries.conf", Data: []byte(""), }, - MirrorConfig: []mirror.RegistriesConfig{ + MirrorConfig: types.MirrorConfig{ { Location: "some.registry.org/release", Mirrors: []string{"some.mirror.org"}, @@ -846,7 +847,7 @@ func TestIgnition_getPublicContainerRegistries(t *testing.T) { Filename: "registries.conf", Data: []byte(""), }, - MirrorConfig: []mirror.RegistriesConfig{ + MirrorConfig: types.MirrorConfig{ { Location: "quay.io/openshift-release-dev/ocp-v4.0-art-dev", Mirrors: []string{"localhost:5000/openshift4/openshift/release"}, @@ -866,7 +867,7 @@ func TestIgnition_getPublicContainerRegistries(t *testing.T) { Filename: "registries.conf", Data: []byte(""), }, - MirrorConfig: []mirror.RegistriesConfig{ + MirrorConfig: types.MirrorConfig{ { Location: "registry.ci.openshift.org/ocp-v4.0-art-dev", Mirrors: []string{"localhost:5000/openshift4/openshift/release"}, diff --git a/pkg/asset/agent/image/oc_test.go b/pkg/asset/agent/image/oc_test.go index dfe54bd0059..b9acd2f80b5 100644 --- a/pkg/asset/agent/image/oc_test.go +++ b/pkg/asset/agent/image/oc_test.go @@ -5,20 +5,20 @@ import ( "github.com/stretchr/testify/assert" - "github.com/openshift/installer/pkg/asset/agent/mirror" + "github.com/openshift/installer/pkg/types" ) func TestGetIcspContents(t *testing.T) { cases := []struct { name string - mirrorConfig []mirror.RegistriesConfig + mirrorConfig types.MirrorConfig expectedError string expectedConfig string }{ { name: "valid-config", - mirrorConfig: []mirror.RegistriesConfig{ + mirrorConfig: types.MirrorConfig{ { Location: "registry.ci.openshift.org/ocp/release", Mirrors: []string{"virthost.ostest.test.metalkube.org:5000/localimages/local-release-image"}, @@ -33,16 +33,14 @@ func TestGetIcspContents(t *testing.T) { }, { name: "empty-config", - mirrorConfig: []mirror.RegistriesConfig{}, + mirrorConfig: types.MirrorConfig{}, expectedConfig: "apiVersion: operator.openshift.io/v1alpha1\nkind: ImageContentSourcePolicy\nmetadata:\n name: image-policy\nspec:\n repositoryDigestMirrors: []\n", expectedError: "", }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - contents, err := (&mirror.RegistriesConf{ - MirrorConfig: tc.mirrorConfig, - }).GetICSPContents() + contents, err := tc.mirrorConfig.GetICSPContents() if tc.expectedError != "" { assert.Equal(t, tc.expectedError, err.Error()) } else { diff --git a/pkg/asset/agent/mirror/registriesconf.go b/pkg/asset/agent/mirror/registriesconf.go index 1c3950e2b62..96edae4e1a9 100644 --- a/pkg/asset/agent/mirror/registriesconf.go +++ b/pkg/asset/agent/mirror/registriesconf.go @@ -2,19 +2,15 @@ package mirror import ( "context" - "encoding/json" "fmt" "os" "path/filepath" "regexp" "github.com/containers/image/v5/pkg/sysregistriesv2" - operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" "github.com/pelletier/go-toml" "github.com/pkg/errors" "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/yaml" configv1 "github.com/openshift/api/config/v1" "github.com/openshift/installer/pkg/asset" @@ -116,13 +112,7 @@ unqualified-search-registries = [] type RegistriesConf struct { File *asset.File Config *sysregistriesv2.V2RegistriesConf - MirrorConfig []RegistriesConfig -} - -// RegistriesConfig holds the data extracted from registries.conf -type RegistriesConfig struct { - Location string - Mirrors []string + MirrorConfig types.MirrorConfig } var _ asset.WritableAsset = (*RegistriesConf)(nil) @@ -233,42 +223,6 @@ func (i *RegistriesConf) generateRegistriesConf(imageDigestSources []types.Image return nil } -// HasMirrors returns whether there are any mirrors configured. -func (i *RegistriesConf) HasMirrors() bool { - return len(i.MirrorConfig) > 0 -} - -// GetICSPContents converts the data in registries.conf into ICSP format. -func (i *RegistriesConf) GetICSPContents() ([]byte, error) { - icsp := operatorv1alpha1.ImageContentSourcePolicy{ - TypeMeta: metav1.TypeMeta{ - APIVersion: operatorv1alpha1.SchemeGroupVersion.String(), - Kind: "ImageContentSourcePolicy", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "image-policy", - // not namespaced - }, - } - - icsp.Spec.RepositoryDigestMirrors = make([]operatorv1alpha1.RepositoryDigestMirrors, len(i.MirrorConfig)) - for i, mirrorRegistries := range i.MirrorConfig { - icsp.Spec.RepositoryDigestMirrors[i] = operatorv1alpha1.RepositoryDigestMirrors{Source: mirrorRegistries.Location, Mirrors: mirrorRegistries.Mirrors} - } - - // Convert to json first so json tags are handled - jsonData, err := json.Marshal(&icsp) - if err != nil { - return nil, err - } - contents, err := yaml.JSONToYAML(jsonData) - if err != nil { - return nil, err - } - - return contents, nil -} - // Files returns the files generated by the asset. func (i *RegistriesConf) Files() []*asset.File { if i.File != nil { @@ -341,9 +295,9 @@ func (i *RegistriesConf) generateDefaultRegistriesConf() error { } func (i *RegistriesConf) setMirrorConfig(registriesConf *sysregistriesv2.V2RegistriesConf) { - mirrorConfig := make([]RegistriesConfig, len(registriesConf.Registries)) + mirrorConfig := make(types.MirrorConfig, len(registriesConf.Registries)) for i, reg := range registriesConf.Registries { - mirrorConfig[i] = RegistriesConfig{ + mirrorConfig[i] = types.Mirror{ Location: reg.Location, } for _, mirror := range reg.Mirrors { diff --git a/pkg/asset/rhcos/releaseextract.go b/pkg/asset/rhcos/releaseextract.go index 06dcbfd6d8b..5cf80760f58 100644 --- a/pkg/asset/rhcos/releaseextract.go +++ b/pkg/asset/rhcos/releaseextract.go @@ -23,6 +23,7 @@ import ( "github.com/openshift/installer/pkg/asset/agent" "github.com/openshift/installer/pkg/rhcos/cache" + "github.com/openshift/installer/pkg/types" ) const ( @@ -49,20 +50,15 @@ type ReleasePayload interface { ExtractFile(image string, filename string, architecture string) ([]string, error) } -type MirrorConfig interface { - HasMirrors() bool - GetICSPContents() ([]byte, error) -} - type releasePayload struct { config ExtractConfig releaseImage string pullSecret string - mirrorConfig MirrorConfig + mirrorConfig types.MirrorConfig } // NewReleasePayload is used to set up the executor to run oc commands. -func NewReleasePayload(config ExtractConfig, releaseImage string, pullSecret string, mirrorConfig MirrorConfig) ReleasePayload { +func NewReleasePayload(config ExtractConfig, releaseImage string, pullSecret string, mirrorConfig types.MirrorConfig) ReleasePayload { if config.MaxTries == 0 { config.MaxTries = ocDefaultTries } @@ -368,7 +364,7 @@ func removeCacheFile(path string) error { } // Create a temporary file containing the ImageContentPolicySources. -func getMirrorArg(mirrorConfig MirrorConfig) (string, func(), error) { +func getMirrorArg(mirrorConfig types.MirrorConfig) (string, func(), error) { if !mirrorConfig.HasMirrors() { logrus.Debugf("No registry entries to build ICSP file") return "", nil, nil diff --git a/pkg/types/mirror.go b/pkg/types/mirror.go new file mode 100644 index 00000000000..80e8f7698d4 --- /dev/null +++ b/pkg/types/mirror.go @@ -0,0 +1,55 @@ +package types + +import ( + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" + + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" +) + +// Mirror holds the mirror list for a registry. +type Mirror struct { + Location string `json:"location"` + Mirrors []string `json:"mirrors,omitempty"` +} + +// MirrorConfig holds the registry mirror data. +type MirrorConfig []Mirror + +// HasMirrors returns whether there are any mirrors configured. +func (mc MirrorConfig) HasMirrors() bool { + return len(mc) > 0 +} + +// GetICSPContents converts the data in registries.conf into ICSP format. +func (mc MirrorConfig) GetICSPContents() ([]byte, error) { + icsp := operatorv1alpha1.ImageContentSourcePolicy{ + TypeMeta: metav1.TypeMeta{ + APIVersion: operatorv1alpha1.SchemeGroupVersion.String(), + Kind: "ImageContentSourcePolicy", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "image-policy", + // not namespaced + }, + } + + icsp.Spec.RepositoryDigestMirrors = make([]operatorv1alpha1.RepositoryDigestMirrors, len(mc)) + for i, mirrorRegistries := range mc { + icsp.Spec.RepositoryDigestMirrors[i] = operatorv1alpha1.RepositoryDigestMirrors{Source: mirrorRegistries.Location, Mirrors: mirrorRegistries.Mirrors} + } + + // Convert to json first so json tags are handled + jsonData, err := json.Marshal(&icsp) + if err != nil { + return nil, err + } + contents, err := yaml.JSONToYAML(jsonData) + if err != nil { + return nil, err + } + + return contents, nil +} diff --git a/pkg/types/zz_generated.deepcopy.go b/pkg/types/zz_generated.deepcopy.go index 4486127295d..b32e9eded99 100644 --- a/pkg/types/zz_generated.deepcopy.go +++ b/pkg/types/zz_generated.deepcopy.go @@ -583,6 +583,49 @@ func (in *MachinePoolPlatform) DeepCopy() *MachinePoolPlatform { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mirror) DeepCopyInto(out *Mirror) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mirror. +func (in *Mirror) DeepCopy() *Mirror { + if in == nil { + return nil + } + out := new(Mirror) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in MirrorConfig) DeepCopyInto(out *MirrorConfig) { + { + in := &in + *out = make(MirrorConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MirrorConfig. +func (in MirrorConfig) DeepCopy() MirrorConfig { + if in == nil { + return nil + } + out := new(MirrorConfig) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Networking) DeepCopyInto(out *Networking) { *out = *in From 8f058347e67ee9ab8a90c314dc4d5636861ddc81 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 31 Mar 2025 13:57:06 +1300 Subject: [PATCH 03/14] Refactor passing of baremetal TFVars Use the same struct to marshal and unmarshal. Presumably this wasn't previously possible when going through actual Terraform. --- pkg/infrastructure/baremetal/variables.go | 31 ++--------------------- pkg/tfvars/baremetal/baremetal.go | 29 ++++++++++++--------- 2 files changed, 19 insertions(+), 41 deletions(-) diff --git a/pkg/infrastructure/baremetal/variables.go b/pkg/infrastructure/baremetal/variables.go index 0ad3639b8ba..220a8f91904 100644 --- a/pkg/infrastructure/baremetal/variables.go +++ b/pkg/infrastructure/baremetal/variables.go @@ -20,17 +20,10 @@ const ( MastersFileName = ".masters.json" ) -type bridge struct { - Name string - MAC string -} - type baremetalConfig struct { ClusterID string - BootstrapOSImage string IgnitionBootstrap string - LibvirtURI string - Bridges []bridge + baremetaltfvars.Config } func getConfig(dir string) (baremetalConfig, error) { @@ -57,27 +50,7 @@ func getConfig(dir string) (baremetalConfig, error) { return config, fmt.Errorf("failed to load cluster terraform variables: %w", err) } - config.BootstrapOSImage = clusterBaremetalConfig.BootstrapOSImage - config.LibvirtURI = clusterBaremetalConfig.LibvirtURI - - for _, bridgeMap := range clusterBaremetalConfig.Bridges { - mac, ok := bridgeMap["mac"] - if !ok { - return config, fmt.Errorf("bridge is missng a MAC address") - } - - name, ok := bridgeMap["name"] - if !ok { - return config, fmt.Errorf("bridge is missng a name") - } - - b := bridge{ - Name: name, - MAC: mac, - } - - config.Bridges = append(config.Bridges, b) - } + config.Config = *clusterBaremetalConfig return config, nil } diff --git a/pkg/tfvars/baremetal/baremetal.go b/pkg/tfvars/baremetal/baremetal.go index 85c8d9ac425..c34c3e04369 100644 --- a/pkg/tfvars/baremetal/baremetal.go +++ b/pkg/tfvars/baremetal/baremetal.go @@ -9,11 +9,17 @@ import ( "github.com/openshift/installer/pkg/rhcos/cache" ) +// Bridge represents a network bridge on the provisioner host. +type Bridge struct { + Name string `json:"name"` + MAC string `json:"mac"` +} + // Config represents the baremetal platform parts of install config needed for bootstrapping. type Config struct { - LibvirtURI string `json:"libvirt_uri,omitempty"` - BootstrapOSImage string `json:"bootstrap_os_image,omitempty"` - Bridges []map[string]string `json:"bridges"` + LibvirtURI string `json:"libvirt_uri,omitempty"` + BootstrapOSImage string `json:"bootstrap_os_image,omitempty"` + Bridges []Bridge `json:"bridges"` } type imageDownloadFunc func(baseURL, applicationName string) (string, error) @@ -33,20 +39,19 @@ func TFVars(libvirtURI string, bootstrapOSImage, externalBridge, externalMAC, pr return nil, errors.Wrap(err, "failed to use cached bootstrap libvirt image") } - var bridges []map[string]string + var bridges []Bridge bridges = append(bridges, - map[string]string{ - "name": externalBridge, - "mac": externalMAC, + Bridge{ + Name: externalBridge, + MAC: externalMAC, }) if provisioningBridge != "" { - bridges = append( - bridges, - map[string]string{ - "name": provisioningBridge, - "mac": provisioningMAC, + bridges = append(bridges, + Bridge{ + Name: provisioningBridge, + MAC: provisioningMAC, }) } From 87d4f835efba3115f40477ed25de41913aae3de8 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Wed, 2 Apr 2025 16:37:47 +1300 Subject: [PATCH 04/14] Temporarily increase VM RAM allocation The amount of storage assigned to the Live /dev/loop0 partition in a Live image boot is proportional to the total amount of RAM. With a 6GiB VM, /dev/loop0 gets a little less than 3GiB. After running the node-image-pull.service there is typically about 2.8GiB used on /dev/loop0 (of which <200MiB was used before running the service). In a 6GiB VM this causes the "ostree container image pull" process to fail due to insufficient free space on the disk. 20GiB of RAM allows us to pull both the ostree container image and the necessary container images to run the metal bootstrap services. --- pkg/infrastructure/baremetal/bootstrap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/infrastructure/baremetal/bootstrap.go b/pkg/infrastructure/baremetal/bootstrap.go index 72f3ec89561..8d1fa3aecf6 100644 --- a/pkg/infrastructure/baremetal/bootstrap.go +++ b/pkg/infrastructure/baremetal/bootstrap.go @@ -69,8 +69,8 @@ func newDomain(name string) libvirtxml.Domain { Mode: "host-passthrough", }, Memory: &libvirtxml.DomainMemory{ - Value: 6144, - Unit: "MiB", + Value: 20, + Unit: "GiB", }, VCPU: &libvirtxml.DomainVCPU{ Value: 4, From 9e8f247ea9dc43b6e8e7901b8e5c5b61a430fe7f Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 31 Mar 2025 13:59:05 +1300 Subject: [PATCH 05/14] Use Live ISO in baremetal bootstrap VM Instead of downloading an RHCOS qemu image for the baremetal bootstrap - which disconnected users have to mirror locally - reuse the agent installer code for fetching a live ISO. If the `oc` binary is present, the live ISO will be retrieved from the release image (respecting the mirror config), which means disconnected users have nothing else to do. In the case that `oc` is not present, we fall back to downloading the image directly (this requires a connection to the Internet). In any case, the old qemu image is no longer used, and if a mirror URL is configured in the install-config it will be ignored. --- pkg/asset/cluster/tfvars/tfvars.go | 9 +- pkg/asset/rhcos/bootstrap_image.go | 31 +-- pkg/infrastructure/baremetal/bootstrap.go | 215 ++++++------------- pkg/infrastructure/baremetal/ignition.go | 86 -------- pkg/infrastructure/baremetal/image.go | 247 ---------------------- pkg/tfvars/baremetal/baremetal.go | 42 ++-- pkg/types/mirror.go | 23 ++ 7 files changed, 108 insertions(+), 545 deletions(-) delete mode 100644 pkg/infrastructure/baremetal/ignition.go delete mode 100644 pkg/infrastructure/baremetal/image.go diff --git a/pkg/asset/cluster/tfvars/tfvars.go b/pkg/asset/cluster/tfvars/tfvars.go index 2b55f751e3d..4b54791fabe 100644 --- a/pkg/asset/cluster/tfvars/tfvars.go +++ b/pkg/asset/cluster/tfvars/tfvars.go @@ -35,6 +35,7 @@ import ( "github.com/openshift/installer/pkg/asset/machines" "github.com/openshift/installer/pkg/asset/manifests" "github.com/openshift/installer/pkg/asset/openshiftinstall" + "github.com/openshift/installer/pkg/asset/releaseimage" "github.com/openshift/installer/pkg/asset/rhcos" "github.com/openshift/installer/pkg/tfvars" awstfvars "github.com/openshift/installer/pkg/tfvars/aws" @@ -101,6 +102,7 @@ func (t *TerraformVariables) Dependencies() []asset.Asset { new(rhcos.Image), new(rhcos.Release), new(rhcos.BootstrapImage), + &releaseimage.Image{}, &bootstrap.Bootstrap{}, &machine.Master{}, &machine.Arbiter{}, @@ -129,8 +131,9 @@ func (t *TerraformVariables) Generate(ctx context.Context, parents asset.Parents rhcosImage := new(rhcos.Image) rhcosRelease := new(rhcos.Release) rhcosBootstrapImage := new(rhcos.BootstrapImage) + releaseImage := &releaseimage.Image{} ironicCreds := &baremetalbootstrap.IronicCreds{} - parents.Get(clusterID, installConfig, bootstrapIgnAsset, arbiterIgnAsset, arbiterAsset, masterIgnAsset, mastersAsset, workersAsset, manifestsAsset, rhcosImage, rhcosRelease, rhcosBootstrapImage, ironicCreds) + parents.Get(clusterID, installConfig, bootstrapIgnAsset, arbiterIgnAsset, arbiterAsset, masterIgnAsset, mastersAsset, workersAsset, manifestsAsset, rhcosImage, rhcosRelease, rhcosBootstrapImage, releaseImage, ironicCreds) platform := installConfig.Config.Platform.Name() switch platform { @@ -764,7 +767,9 @@ func (t *TerraformVariables) Generate(ctx context.Context, parents asset.Parents case baremetal.Name: data, err = baremetaltfvars.TFVars( installConfig.Config.Platform.BareMetal.LibvirtURI, - string(*rhcosBootstrapImage), + releaseImage.PullSpec, + installConfig.Config.PullSecret, + types.BuildMirrorConfig(installConfig.Config), installConfig.Config.Platform.BareMetal.ExternalBridge, installConfig.Config.Platform.BareMetal.ExternalMACAddress, installConfig.Config.Platform.BareMetal.ProvisioningBridge, diff --git a/pkg/asset/rhcos/bootstrap_image.go b/pkg/asset/rhcos/bootstrap_image.go index 10d1428cdd6..58373799b24 100644 --- a/pkg/asset/rhcos/bootstrap_image.go +++ b/pkg/asset/rhcos/bootstrap_image.go @@ -3,14 +3,9 @@ package rhcos import ( "context" - "fmt" - "time" - - "github.com/coreos/stream-metadata-go/arch" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/rhcos" "github.com/openshift/installer/pkg/types/baremetal" ) @@ -37,9 +32,6 @@ func (i *BootstrapImage) Dependencies() []asset.Asset { // Generate the RHCOS Bootstrap image location. func (i *BootstrapImage) Generate(ctx context.Context, p asset.Parents) error { - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - ic := &installconfig.InstallConfig{} rhcosImage := new(Image) p.Get(ic, rhcosImage) @@ -47,32 +39,11 @@ func (i *BootstrapImage) Generate(ctx context.Context, p asset.Parents) error { switch config.Platform.Name() { case baremetal.Name: - archName := arch.RpmArch(string(config.ControlPlane.Architecture)) - st, err := rhcos.FetchCoreOSBuild(ctx) - if err != nil { - return err - } - streamArch, err := st.GetArchitecture(archName) - if err != nil { - return err - } - // Check for CoreOS image URL override if boi := config.Platform.BareMetal.BootstrapOSImage; boi != "" { *i = BootstrapImage(boi) - return nil - } - // Baremetal IPI launches a local VM for the bootstrap node - // Hence requires the QEMU image to use the libvirt backend - if a, ok := streamArch.Artifacts["qemu"]; ok { - u, err := rhcos.FindArtifactURL(a) - if err != nil { - return err - } - *i = BootstrapImage(u) - return nil } - return fmt.Errorf("%s: No qemu build found", st.FormatPrefix(archName)) + return nil default: // other platforms use the same image for all nodes *i = BootstrapImage(rhcosImage.ControlPlane) diff --git a/pkg/infrastructure/baremetal/bootstrap.go b/pkg/infrastructure/baremetal/bootstrap.go index 8d1fa3aecf6..7942f786329 100644 --- a/pkg/infrastructure/baremetal/bootstrap.go +++ b/pkg/infrastructure/baremetal/bootstrap.go @@ -1,6 +1,7 @@ package baremetal import ( + "context" "encoding/xml" "errors" "fmt" @@ -12,23 +13,10 @@ import ( "github.com/digitalocean/go-libvirt" "github.com/sirupsen/logrus" "libvirt.org/go/libvirtxml" -) - -func newCopier(virConn *libvirt.Libvirt, volume libvirt.StorageVol, size uint64) func(src io.Reader) error { - copier := func(src io.Reader) error { - return virConn.StorageVolUpload(volume, src, 0, size, 0) - } - return copier -} -func newVolumeFromXML(s string) (libvirtxml.StorageVolume, error) { - var volumeDef libvirtxml.StorageVolume - err := xml.Unmarshal([]byte(s), &volumeDef) - if err != nil { - return libvirtxml.StorageVolume{}, err - } - return volumeDef, nil -} + "github.com/openshift/assisted-image-service/pkg/isoeditor" + "github.com/openshift/installer/pkg/asset/rhcos" +) func newDomain(name string) libvirtxml.Domain { domainDef := libvirtxml.Domain{ @@ -124,24 +112,6 @@ func newDomain(name string) libvirtxml.Domain { return domainDef } -func newVolume(name string) libvirtxml.StorageVolume { - return libvirtxml.StorageVolume{ - Name: name, - Target: &libvirtxml.StorageVolumeTarget{ - Format: &libvirtxml.StorageVolumeTargetFormat{ - Type: "qcow2", - }, - Permissions: &libvirtxml.StorageVolumeTargetPermissions{ - Mode: "644", - }, - }, - Capacity: &libvirtxml.StorageVolumeSize{ - Unit: "bytes", - Value: 1, - }, - } -} - func createStoragePool(virConn *libvirt.Libvirt, config baremetalConfig) (libvirt.StoragePool, error) { // TODO: check if unique bootstrapPool := libvirtxml.StoragePool{ @@ -184,94 +154,80 @@ func createStoragePool(virConn *libvirt.Libvirt, config baremetalConfig) (libvir return pool, nil } -func createBaseVolume(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool) (libvirt.StorageVol, error) { - bootstrapBaseVolume := newVolume(fmt.Sprintf("%s-bootstrap-base", config.ClusterID)) - image, err := newImage(config.BootstrapOSImage) - if err != nil { - return libvirt.StorageVol{}, err - } +func getLiveISO(config baremetalConfig, arch string) (string, error) { + fetcher := rhcos.NewBaseISOFetcher( + rhcos.NewReleasePayload( + rhcos.ExtractConfig{}, + config.ReleaseImagePullSpec, + config.PullSecret, + config.MirrorConfig, + ), + nil) + return fetcher.GetBaseISOFilename(context.Background(), arch) +} - isQCOW2, err := image.IsQCOW2() +func createLiveVolume(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool) (libvirt.StorageVol, error) { + capabilities, err := getHostCapabilities(virConn) if err != nil { - return libvirt.StorageVol{}, err - } - if isQCOW2 { - bootstrapBaseVolume.Target.Format.Type = "qcow2" + return libvirt.StorageVol{}, fmt.Errorf("failed to get libvirt capabilities: %w", err) } - size, err := image.Size() + isoFile, err := getLiveISO(config, capabilities.Host.CPU.Arch) if err != nil { return libvirt.StorageVol{}, err } + defer os.Remove(isoFile) - bootstrapBaseVolume.Capacity.Unit = "B" - bootstrapBaseVolume.Capacity.Value = size - - bootstrapBaseVolumeXML, err := xml.Marshal(bootstrapBaseVolume) + stream, err := isoeditor.NewRHCOSStreamReader( + isoFile, + &isoeditor.IgnitionContent{Config: []byte(config.IgnitionBootstrap)}, + nil, + nil, // TODO(zaneb): FIPS + ) if err != nil { return libvirt.StorageVol{}, err } - - baseVolume, err := virConn.StorageVolCreateXML(pool, string(bootstrapBaseVolumeXML), 0) - + defer stream.Close() + size, err := stream.Seek(0, io.SeekEnd) if err != nil { return libvirt.StorageVol{}, err } - - err = image.Import(newCopier(virConn, baseVolume, bootstrapBaseVolume.Capacity.Value), bootstrapBaseVolume) + _, err = stream.Seek(0, io.SeekStart) if err != nil { return libvirt.StorageVol{}, err } - return baseVolume, nil -} - -func createMainVolume(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool, baseVolume libvirt.StorageVol) (libvirt.StorageVol, error) { - bootstrapVolume := newVolume(fmt.Sprintf("%s-bootstrap", config.ClusterID)) - bootstrapVolume.Capacity.Value = 34359738368 - - volPath, err := virConn.StorageVolGetPath(baseVolume) - if err != nil { - return libvirt.StorageVol{}, err + bootstrapLiveVolume := libvirtxml.StorageVolume{ + Name: fmt.Sprintf("%s-live-provisioner", config.ClusterID), + Type: "file", + Target: &libvirtxml.StorageVolumeTarget{ + Format: &libvirtxml.StorageVolumeTargetFormat{ + Type: "iso", + }, + Permissions: &libvirtxml.StorageVolumeTargetPermissions{ + Mode: "644", + }, + }, + Capacity: &libvirtxml.StorageVolumeSize{ + Value: uint64(size), + }, } - - baseVolumeXMLDesc, err := virConn.StorageVolGetXMLDesc(baseVolume, 0) + bootstrapLiveVolumeXML, err := xml.Marshal(bootstrapLiveVolume) if err != nil { return libvirt.StorageVol{}, err } - baseVolFromLibvirt, err := newVolumeFromXML(baseVolumeXMLDesc) + liveVolume, err := virConn.StorageVolCreateXML(pool, string(bootstrapLiveVolumeXML), 0) if err != nil { return libvirt.StorageVol{}, err } - backingStoreVolumeDef := libvirtxml.StorageVolumeBackingStore{ - Path: volPath, - Format: baseVolFromLibvirt.Target.Format, - } - - bootstrapVolume.BackingStore = &backingStoreVolumeDef - - bootstrapVolumeXML, err := xml.Marshal(bootstrapVolume) + err = virConn.StorageVolUpload(liveVolume, stream, 0, uint64(size), 0) if err != nil { return libvirt.StorageVol{}, err } - return virConn.StorageVolCreateXML(pool, string(bootstrapVolumeXML), 0) -} -func createIgnition(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool) error { - bootstrapIgnition := defIgnition{ - Name: fmt.Sprintf("%s-bootstrap.ign", config.ClusterID), - PoolName: pool.Name, - Content: config.IgnitionBootstrap, - } - - _, err := bootstrapIgnition.CreateAndUpload(virConn) - if err != nil { - return err - } - - return nil + return liveVolume, nil } func getHostCapabilities(virConn *libvirt.Libvirt) (libvirtxml.Caps, error) { @@ -290,7 +246,7 @@ func getHostCapabilities(virConn *libvirt.Libvirt) (libvirtxml.Caps, error) { return caps, nil } -func createBootstrapDomain(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool, volume libvirt.StorageVol) error { +func createBootstrapDomain(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool, liveCDVolume libvirt.StorageVol) error { bootstrapDom := newDomain(fmt.Sprintf("%s-bootstrap", config.ClusterID)) capabilities, err := getHostCapabilities(virConn) @@ -330,42 +286,25 @@ func createBootstrapDomain(virConn *libvirt.Libvirt, config baremetalConfig, poo bootstrapDom.Devices.Interfaces = append(bootstrapDom.Devices.Interfaces, netIface) } - disk := libvirtxml.DomainDisk{ - Device: "disk", + liveCD := libvirtxml.DomainDisk{ + Device: "cdrom", Target: &libvirtxml.DomainDiskTarget{ - Bus: "virtio", - Dev: "vda", + Bus: "sata", + Dev: "sda", }, Driver: &libvirtxml.DomainDiskDriver{ Name: "qemu", Type: "raw", }, Source: &libvirtxml.DomainDiskSource{ - Index: 0, Volume: &libvirtxml.DomainDiskSourceVolume{ Pool: pool.Name, - Volume: volume.Name, + Volume: liveCDVolume.Name, }, }, } - disk.Driver = &libvirtxml.DomainDiskDriver{ - Name: "qemu", - Type: "qcow2", - } - bootstrapDom.Devices.Disks = append(bootstrapDom.Devices.Disks, disk) - - ignitionKey := fmt.Sprintf("/var/lib/libvirt/openshift-images/%s-bootstrap/%s-bootstrap.ign", config.ClusterID, config.ClusterID) - bootstrapDom.QEMUCommandline = &libvirtxml.DomainQEMUCommandline{ - Args: []libvirtxml.DomainQEMUCommandlineArg{ - { - Value: "-fw_cfg", - }, - { - Value: fmt.Sprintf("name=%s,file=%s", "opt/com.coreos/config", ignitionKey), - }, - }, - } + bootstrapDom.Devices.Disks = append(bootstrapDom.Devices.Disks, liveCD) bootstrapDom.Resource = nil @@ -405,26 +344,14 @@ func createBootstrap(config baremetalConfig) error { return err } - logrus.Debug(" Creating base volume") - baseVolume, err := createBaseVolume(virConn, config, pool) - if err != nil { - return err - } - - logrus.Debug(" Creating main volume") - mainVolume, err := createMainVolume(virConn, config, pool, baseVolume) - if err != nil { - return err - } - - logrus.Debug(" Creating ignition") - err = createIgnition(virConn, config, pool) + logrus.Debug(" Creating live volume") + liveVolume, err := createLiveVolume(virConn, config, pool) if err != nil { return err } logrus.Debug(" Creating bootstrap domain") - err = createBootstrapDomain(virConn, config, pool, mainVolume) + err = createBootstrapDomain(virConn, config, pool, liveVolume) if err != nil { return err } @@ -482,34 +409,12 @@ func destroyBootstrap(config baremetalConfig) error { return err } - vol, err := virConn.StorageVolLookupByName(pool, name) - if err != nil { - return err - } - - logrus.Debug(" Deleting main volume") - err = virConn.StorageVolDelete(vol, libvirt.StorageVolDeleteNormal) - if err != nil { - return err - } - - vol, err = virConn.StorageVolLookupByName(pool, fmt.Sprintf("%s-bootstrap-base", config.ClusterID)) - if err != nil { - return err - } - - logrus.Debug(" Deleting base volume") - err = virConn.StorageVolDelete(vol, libvirt.StorageVolDeleteNormal) - if err != nil { - return err - } - - vol, err = virConn.StorageVolLookupByName(pool, fmt.Sprintf("%s-bootstrap.ign", config.ClusterID)) + vol, err := virConn.StorageVolLookupByName(pool, fmt.Sprintf("%s-live-provisioner", config.ClusterID)) if err != nil { return err } - logrus.Debug(" Deleting ignition volume") + logrus.Debug(" Deleting live volume") err = virConn.StorageVolDelete(vol, libvirt.StorageVolDeleteNormal) if err != nil { return err @@ -521,7 +426,7 @@ func destroyBootstrap(config baremetalConfig) error { return err } - logrus.Debug(" Deleting pool pool") + logrus.Debug(" Deleting pool") err = virConn.StoragePoolDelete(pool, libvirt.StoragePoolDeleteNormal) if err != nil { return err diff --git a/pkg/infrastructure/baremetal/ignition.go b/pkg/infrastructure/baremetal/ignition.go deleted file mode 100644 index 6196867045e..00000000000 --- a/pkg/infrastructure/baremetal/ignition.go +++ /dev/null @@ -1,86 +0,0 @@ -package baremetal - -import ( - "encoding/xml" - "fmt" - "os" - - "github.com/digitalocean/go-libvirt" - "github.com/sirupsen/logrus" -) - -type defIgnition struct { - Name string - PoolName string - Content string -} - -func (ign *defIgnition) createFile() (string, error) { - tempFile, err := os.CreateTemp("", ign.Name) - if err != nil { - return "", fmt.Errorf("error creating tmp file: %w", err) - } - defer tempFile.Close() - - if _, err := tempFile.WriteString(ign.Content); err != nil { - return "", fmt.Errorf("cannot write Ignition object to temporary " + - "ignition file") - } - - return tempFile.Name(), nil -} - -func (ign *defIgnition) CreateAndUpload(client *libvirt.Libvirt) (string, error) { - pool, err := client.StoragePoolLookupByName(ign.PoolName) - if err != nil { - return "", fmt.Errorf("can't find storage pool '%s'", ign.PoolName) - } - - err = client.StoragePoolRefresh(pool, 0) - if err != nil { - return "", fmt.Errorf("failed to refresh pool %w", err) - } - - volumeDef := newVolume(ign.Name) - - ignFile, err := ign.createFile() - if err != nil { - return "", err - } - defer func() { - if err = os.Remove(ignFile); err != nil { - logrus.Errorf("error while removing tmp Ignition file: %s", err) - } - }() - - img, err := newImage(ignFile) - if err != nil { - return "", err - } - - size, err := img.Size() - if err != nil { - return "", err - } - - volumeDef.Capacity.Unit = "B" - volumeDef.Capacity.Value = size - volumeDef.Target.Format.Type = "raw" - - volumeDefXML, err := xml.Marshal(volumeDef) - if err != nil { - return "", fmt.Errorf("error serializing libvirt volume: %w", err) - } - - volume, err := client.StorageVolCreateXML(pool, string(volumeDefXML), 0) - if err != nil { - return "", fmt.Errorf("error creating libvirt volume for Ignition %s: %w", ign.Name, err) - } - - err = img.Import(newCopier(client, volume, volumeDef.Capacity.Value), volumeDef) - if err != nil { - return "", fmt.Errorf("error while uploading ignition file %s: %w", img.String(), err) - } - - return "", nil -} diff --git a/pkg/infrastructure/baremetal/image.go b/pkg/infrastructure/baremetal/image.go deleted file mode 100644 index 09e9a7aa1e3..00000000000 --- a/pkg/infrastructure/baremetal/image.go +++ /dev/null @@ -1,247 +0,0 @@ -// This file is largely based on existing code from terraform-provider-libvirt 0.6.12. -// https://github.com/dmacvicar/terraform-provider-libvirt -// Original code distributed under the terms of Apache License 2.0. -package baremetal - -import ( - "fmt" - "io" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/sirupsen/logrus" - "libvirt.org/go/libvirtxml" -) - -type image interface { - Size() (uint64, error) - Import(func(io.Reader) error, libvirtxml.StorageVolume) error - String() string - IsQCOW2() (bool, error) -} - -type localImage struct { - path string -} - -func (i *localImage) String() string { - return i.path -} - -func isQCOW2Header(buf []byte) (bool, error) { - if len(buf) < 8 { - return false, fmt.Errorf("expected header of 8 bytes. Got %d", len(buf)) - } - if buf[0] == 'Q' && buf[1] == 'F' && buf[2] == 'I' && buf[3] == 0xfb && buf[4] == 0x00 && buf[5] == 0x00 && buf[6] == 0x00 && buf[7] == 0x03 { - return true, nil - } - return false, nil -} - -func (i *localImage) Size() (uint64, error) { - fi, err := os.Stat(i.path) - if err != nil { - return 0, err - } - return uint64(fi.Size()), nil -} - -func (i *localImage) IsQCOW2() (bool, error) { - file, err := os.Open(i.path) - if err != nil { - return false, fmt.Errorf("error while opening %s: %w", i.path, err) - } - defer file.Close() - buf := make([]byte, 8) - _, err = io.ReadAtLeast(file, buf, 8) - if err != nil { - return false, err - } - return isQCOW2Header(buf) -} - -func (i *localImage) Import(copier func(io.Reader) error, vol libvirtxml.StorageVolume) error { - file, err := os.Open(i.path) - if err != nil { - return fmt.Errorf("error while opening %s: %w", i.path, err) - } - defer file.Close() - - fi, err := file.Stat() - if err != nil { - return err - } - // we can skip the upload if the modification times are the same - if vol.Target.Timestamps != nil && vol.Target.Timestamps.Mtime != "" { - if fi.ModTime().Equal(timeFromEpoch(vol.Target.Timestamps.Mtime)) { - logrus.Info("Modification time is the same: skipping image copy") - return nil - } - } - - return copier(file) -} - -type httpImage struct { - url *url.URL -} - -func (i *httpImage) String() string { - return i.url.String() -} - -func (i *httpImage) Size() (uint64, error) { - response, err := http.Head(i.url.String()) - if err != nil { - return 0, err - } - if response.StatusCode == 403 { - // possibly only the HEAD method is forbidden, try a Body-less GET instead - response, err = http.Get(i.url.String()) - if err != nil { - return 0, err - } - - response.Body.Close() - } - if response.StatusCode != 200 { - return 0, - fmt.Errorf( - "error accessing remote resource: %s - %s", - i.url.String(), - response.Status) - } - - length, err := strconv.Atoi(response.Header.Get("Content-Length")) - if err != nil { - err = fmt.Errorf( - "error while getting Content-Length of \"%s\": %w - got %s", - i.url.String(), - err, - response.Header.Get("Content-Length")) - return 0, err - } - return uint64(length), nil -} - -func (i *httpImage) IsQCOW2() (bool, error) { - client := &http.Client{} - req, err := http.NewRequest("GET", i.url.String(), nil) - if err != nil { - return false, err - } - req.Header.Set("Range", "bytes=0-7") - response, err := client.Do(req) - - if err != nil { - return false, err - } - defer response.Body.Close() - - if response.StatusCode != 206 { - return false, fmt.Errorf( - "can't retrieve partial header of resource to determine file type: %s - %s", - i.url.String(), - response.Status) - } - - header, err := io.ReadAll(response.Body) - if err != nil { - return false, err - } - - if len(header) < 8 { - return false, fmt.Errorf( - "can't retrieve read header of resource to determine file type: %s - %d bytes read", - i.url.String(), - len(header)) - } - - return isQCOW2Header(header) -} - -func (i *httpImage) Import(copier func(io.Reader) error, vol libvirtxml.StorageVolume) error { - // number of download retries on non client errors (eg. 5xx) - const maxHTTPRetries int = 3 - // wait time between retries - const retryWait time.Duration = 2 * time.Second - - client := &http.Client{} - req, err := http.NewRequest("GET", i.url.String(), nil) - - if err != nil { - return fmt.Errorf("error while downloading %s: %w", i.url.String(), err) - } - - if vol.Target.Timestamps != nil && vol.Target.Timestamps.Mtime != "" { - req.Header.Set("If-Modified-Since", timeFromEpoch(vol.Target.Timestamps.Mtime).UTC().Format(http.TimeFormat)) - } - - var response *http.Response - for retryCount := 0; retryCount < maxHTTPRetries; retryCount++ { - response, err = client.Do(req) - if err != nil { - return fmt.Errorf("error while downloading %s: %w", i.url.String(), err) - } - defer response.Body.Close() - - logrus.Debugf("url resp status code %s (retry #%d)\n", response.Status, retryCount) - - switch response.StatusCode { - case http.StatusNotModified: - return nil - case http.StatusOK: - return copier(response.Body) - default: - if response.StatusCode < 500 { - break - } - // The problem is not client but server side - // retry a few times after a small wait - if retryCount < maxHTTPRetries { - time.Sleep(retryWait) - } - } - } - return fmt.Errorf("error while downloading %s: %v", i.url.String(), response) -} - -func newImage(source string) (image, error) { - url, err := url.Parse(source) - if err != nil { - return nil, fmt.Errorf("can't parse source '%s' as url: %w", source, err) - } - - if strings.HasPrefix(url.Scheme, "http") { - return &httpImage{url: url}, nil - } - - if url.Scheme == "file" || url.Scheme == "" { - return &localImage{path: url.Path}, nil - } - - return nil, fmt.Errorf("don't know how to read from '%s': %w", url.String(), err) -} - -func timeFromEpoch(str string) time.Time { - var s, ns int - var err error - - ts := strings.Split(str, ".") - if len(ts) == 2 { - ns, err = strconv.Atoi(ts[1]) - if err != nil { - ns = 0 - } - } - s, err = strconv.Atoi(ts[0]) - if err != nil { - s = 0 - } - - return time.Unix(int64(s), int64(ns)) -} diff --git a/pkg/tfvars/baremetal/baremetal.go b/pkg/tfvars/baremetal/baremetal.go index c34c3e04369..de44c6fc5a5 100644 --- a/pkg/tfvars/baremetal/baremetal.go +++ b/pkg/tfvars/baremetal/baremetal.go @@ -4,9 +4,7 @@ package baremetal import ( "encoding/json" - "github.com/pkg/errors" - - "github.com/openshift/installer/pkg/rhcos/cache" + "github.com/openshift/installer/pkg/types" ) // Bridge represents a network bridge on the provisioner host. @@ -17,28 +15,20 @@ type Bridge struct { // Config represents the baremetal platform parts of install config needed for bootstrapping. type Config struct { - LibvirtURI string `json:"libvirt_uri,omitempty"` - BootstrapOSImage string `json:"bootstrap_os_image,omitempty"` - Bridges []Bridge `json:"bridges"` -} - -type imageDownloadFunc func(baseURL, applicationName string) (string, error) - -var ( - imageDownloader imageDownloadFunc -) - -func init() { - imageDownloader = cache.DownloadImageFile + LibvirtURI string `json:"libvirt_uri,omitempty"` + ReleaseImagePullSpec string `json:"release_image,omitempty"` + PullSecret string `json:"pull_secret,omitempty"` + MirrorConfig types.MirrorConfig `json:"mirror_config,omitempty"` + Bridges []Bridge `json:"bridges"` } // TFVars generates bare metal specific Terraform variables. -func TFVars(libvirtURI string, bootstrapOSImage, externalBridge, externalMAC, provisioningBridge, provisioningMAC string) ([]byte, error) { - bootstrapOSImage, err := imageDownloader(bootstrapOSImage, cache.InstallerApplicationName) - if err != nil { - return nil, errors.Wrap(err, "failed to use cached bootstrap libvirt image") - } - +func TFVars( + libvirtURI string, + releaseImagePullSpec string, + pullSecret string, + mirrorConfig types.MirrorConfig, + externalBridge, externalMAC, provisioningBridge, provisioningMAC string) ([]byte, error) { var bridges []Bridge bridges = append(bridges, @@ -56,9 +46,11 @@ func TFVars(libvirtURI string, bootstrapOSImage, externalBridge, externalMAC, pr } cfg := &Config{ - LibvirtURI: libvirtURI, - BootstrapOSImage: bootstrapOSImage, - Bridges: bridges, + LibvirtURI: libvirtURI, + ReleaseImagePullSpec: releaseImagePullSpec, + PullSecret: pullSecret, + MirrorConfig: mirrorConfig, + Bridges: bridges, } return json.MarshalIndent(cfg, "", " ") diff --git a/pkg/types/mirror.go b/pkg/types/mirror.go index 80e8f7698d4..b380e9cd376 100644 --- a/pkg/types/mirror.go +++ b/pkg/types/mirror.go @@ -18,6 +18,29 @@ type Mirror struct { // MirrorConfig holds the registry mirror data. type MirrorConfig []Mirror +// BuildMirrorConfig populates a MirrorConfig from a given InstallConfig. +func BuildMirrorConfig(ic *InstallConfig) MirrorConfig { + var mc MirrorConfig + + if len(ic.ImageDigestSources) > 0 { + for _, src := range ic.ImageDigestSources { + mc = append(mc, Mirror{ + Location: src.Source, + Mirrors: src.Mirrors, + }) + } + } else if len(ic.DeprecatedImageContentSources) > 0 { + for _, src := range ic.DeprecatedImageContentSources { + mc = append(mc, Mirror{ + Location: src.Source, + Mirrors: src.Mirrors, + }) + } + } + + return mc +} + // HasMirrors returns whether there are any mirrors configured. func (mc MirrorConfig) HasMirrors() bool { return len(mc) > 0 From 5bedbf680765b7331350be4695798bceddb87624 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Tue, 1 Jul 2025 22:50:15 +1200 Subject: [PATCH 06/14] Enable FIPS on baremetal bootstrap VM If the cluster is FIPS mode, we should enable FIPS on the baremetal bootstrap VM also. --- pkg/asset/cluster/tfvars/tfvars.go | 1 + pkg/infrastructure/baremetal/bootstrap.go | 6 +++++- pkg/tfvars/baremetal/baremetal.go | 3 +++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/asset/cluster/tfvars/tfvars.go b/pkg/asset/cluster/tfvars/tfvars.go index 4b54791fabe..12e16a4a92a 100644 --- a/pkg/asset/cluster/tfvars/tfvars.go +++ b/pkg/asset/cluster/tfvars/tfvars.go @@ -767,6 +767,7 @@ func (t *TerraformVariables) Generate(ctx context.Context, parents asset.Parents case baremetal.Name: data, err = baremetaltfvars.TFVars( installConfig.Config.Platform.BareMetal.LibvirtURI, + installConfig.Config.FIPS, releaseImage.PullSpec, installConfig.Config.PullSecret, types.BuildMirrorConfig(installConfig.Config), diff --git a/pkg/infrastructure/baremetal/bootstrap.go b/pkg/infrastructure/baremetal/bootstrap.go index 7942f786329..d1f4d9f9cff 100644 --- a/pkg/infrastructure/baremetal/bootstrap.go +++ b/pkg/infrastructure/baremetal/bootstrap.go @@ -178,11 +178,15 @@ func createLiveVolume(virConn *libvirt.Libvirt, config baremetalConfig, pool lib } defer os.Remove(isoFile) + var kargs string + if config.FIPS { + kargs += " fips=1" + } stream, err := isoeditor.NewRHCOSStreamReader( isoFile, &isoeditor.IgnitionContent{Config: []byte(config.IgnitionBootstrap)}, nil, - nil, // TODO(zaneb): FIPS + []byte(kargs), ) if err != nil { return libvirt.StorageVol{}, err diff --git a/pkg/tfvars/baremetal/baremetal.go b/pkg/tfvars/baremetal/baremetal.go index de44c6fc5a5..4f80609b8c5 100644 --- a/pkg/tfvars/baremetal/baremetal.go +++ b/pkg/tfvars/baremetal/baremetal.go @@ -16,6 +16,7 @@ type Bridge struct { // Config represents the baremetal platform parts of install config needed for bootstrapping. type Config struct { LibvirtURI string `json:"libvirt_uri,omitempty"` + FIPS bool `json:"fips,omitempty"` ReleaseImagePullSpec string `json:"release_image,omitempty"` PullSecret string `json:"pull_secret,omitempty"` MirrorConfig types.MirrorConfig `json:"mirror_config,omitempty"` @@ -25,6 +26,7 @@ type Config struct { // TFVars generates bare metal specific Terraform variables. func TFVars( libvirtURI string, + fips bool, releaseImagePullSpec string, pullSecret string, mirrorConfig types.MirrorConfig, @@ -47,6 +49,7 @@ func TFVars( cfg := &Config{ LibvirtURI: libvirtURI, + FIPS: fips, ReleaseImagePullSpec: releaseImagePullSpec, PullSecret: pullSecret, MirrorConfig: mirrorConfig, From cc78208d4799cc6d193f578eb3128fdaef41d0ab Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Wed, 2 Apr 2025 23:27:24 +1300 Subject: [PATCH 07/14] Eliminate extract-machine-os from bootstrap When the bootstrap is booted from a live ISO, we have access to both the ISO image and the PXE boot components in it directly from the host filesystem; there is no need to extract it again from the release image. This saves a lot of RAM in the live environment. We must pass SecurityLabelDisable=true because the mounted live ISO is a read-only filesystem, and therefore we are unable to relabel the files with the necessary SELinux context. --- .../systemd/image-customization.container | 11 +++++--- .../systemd/ironic-httpd.container.template | 4 ++- .../systemd/ironic.container.template | 3 +++ .../metal3-baremetal-operator.container | 2 +- .../files/usr/local/bin/extract-machine-os.sh | 9 ------- .../systemd/units/extract-machine-os.service | 27 ------------------- pkg/infrastructure/baremetal/bootstrap.go | 2 +- 7 files changed, 15 insertions(+), 43 deletions(-) delete mode 100644 data/data/bootstrap/baremetal/files/usr/local/bin/extract-machine-os.sh delete mode 100644 data/data/bootstrap/baremetal/systemd/units/extract-machine-os.service diff --git a/data/data/bootstrap/baremetal/files/etc/containers/systemd/image-customization.container b/data/data/bootstrap/baremetal/files/etc/containers/systemd/image-customization.container index b9ad1fcc380..87aefece6fa 100644 --- a/data/data/bootstrap/baremetal/files/etc/containers/systemd/image-customization.container +++ b/data/data/bootstrap/baremetal/files/etc/containers/systemd/image-customization.container @@ -3,8 +3,8 @@ Description=Customized Machine OS Image Server BindsTo=ironic-volume.service Requires=build-ironic-env.service PartOf=ironic.service -Wants=network-online.target extract-machine-os.service -After=network-online.target ironic-volume.service build-ironic-env.service extract-machine-os.service +Wants=network-online.target +After=network-online.target ironic-volume.service build-ironic-env.service [Container] ContainerName=image-customization @@ -17,8 +17,11 @@ Volume=/etc/containers:/tmp/containers:z Volume=${AUTH_DIR}:/auth:z,ro Volume=/opt/openshift:/opt/openshift:z,ro Volume=/etc/pki/ca-trust/source/anchors/ca.crt:/tmp/ca.crt:z,ro -Environment="DEPLOY_ISO=/shared/html/images/ironic-python-agent.iso" -Environment="DEPLOY_INITRD=/shared/html/images/ironic-python-agent.initramfs" +AddDevice=/dev/sr0:/images/live.iso:r +Volume=/run/media/iso/images/pxeboot:/images/pxe:ro +SecurityLabelDisable=true +Environment="DEPLOY_ISO=/images/live.iso" +Environment="DEPLOY_INITRD=/images/pxe/initrd.img" Environment="IRONIC_BASE_URL=${IRONIC_BASE_URL}" Environment="IRONIC_RAMDISK_SSH_KEY=${IRONIC_RAMDISK_SSH_KEY}" Environment="IRONIC_AGENT_IMAGE=${IRONIC_AGENT_IMAGE}" diff --git a/data/data/bootstrap/baremetal/files/etc/containers/systemd/ironic-httpd.container.template b/data/data/bootstrap/baremetal/files/etc/containers/systemd/ironic-httpd.container.template index 9a6f3e1506c..5d8cf3c48dd 100644 --- a/data/data/bootstrap/baremetal/files/etc/containers/systemd/ironic-httpd.container.template +++ b/data/data/bootstrap/baremetal/files/etc/containers/systemd/ironic-httpd.container.template @@ -4,7 +4,7 @@ BindsTo=ironic-volume.service Requires=build-ironic-env.service PartOf=ironic.service Wants=network-online.target -After=network-online.target ironic-volume.service build-ironic-env.service extract-machine-os.service provisioning-interface.service +After=network-online.target ironic-volume.service build-ironic-env.service provisioning-interface.service [Container] ContainerName=httpd @@ -21,6 +21,8 @@ Volume=ironic.volume:/shared:z Volume=/opt/openshift/tls/ironic/:/certs/vmedia/:z {{ end }} Volume=/opt/openshift/tls/ironic/:/certs/ironic/:z +Volume=/run/media/iso/images/pxeboot/rootfs.img:/shared/html/images/ironic-python-agent.rootfs:ro +SecurityLabelDisable=true Environment="IRONIC_RAMDISK_SSH_KEY=${IRONIC_RAMDISK_SSH_KEY}" Environment="PROVISIONING_INTERFACE=${PROVISIONING_INTERFACE}" Environment="HTTP_PORT=${HTTP_PORT}" diff --git a/data/data/bootstrap/baremetal/files/etc/containers/systemd/ironic.container.template b/data/data/bootstrap/baremetal/files/etc/containers/systemd/ironic.container.template index 2546c50016f..f82e446ea32 100644 --- a/data/data/bootstrap/baremetal/files/etc/containers/systemd/ironic.container.template +++ b/data/data/bootstrap/baremetal/files/etc/containers/systemd/ironic.container.template @@ -25,6 +25,9 @@ Volume=/opt/openshift/tls/ironic/:/certs/ironic/:z {{ if .PlatformData.BareMetal.BMCVerifyCA }} Volume=/opt/openshift/bmc-ca:/certs/ca/bmc:z {{ end }} +# Note we are limited here by conductor.file_url_allowed_paths in the ironic config +Volume=/run/media/iso/images/pxeboot/vmlinuz:/var/lib/ironic/pxe/vmlinuz:ro +SecurityLabelDisable=true Environment="IRONIC_RAMDISK_SSH_KEY=${IRONIC_RAMDISK_SSH_KEY}" Environment="PROVISIONING_INTERFACE=${PROVISIONING_INTERFACE}" Environment="OS_CONDUCTOR__HEARTBEAT_TIMEOUT=120" diff --git a/data/data/bootstrap/baremetal/files/etc/containers/systemd/metal3-baremetal-operator.container b/data/data/bootstrap/baremetal/files/etc/containers/systemd/metal3-baremetal-operator.container index 40855f545a9..53bf8356f7f 100644 --- a/data/data/bootstrap/baremetal/files/etc/containers/systemd/metal3-baremetal-operator.container +++ b/data/data/bootstrap/baremetal/files/etc/containers/systemd/metal3-baremetal-operator.container @@ -14,7 +14,7 @@ Volume=/opt/openshift:/opt/openshift:z,ro Volume=/opt/openshift/tls/ironic/:/certs/ironic/:z Environment="XDG_RUNTIME_DIR=/run/user/${UID}" Environment="KUBECONFIG=/opt/openshift/auth/kubeconfig-loopback" -Environment="DEPLOY_KERNEL_URL=file:///shared/html/images/ironic-python-agent.kernel" +Environment="DEPLOY_KERNEL_URL=file:///var/lib/ironic/pxe/vmlinuz" Environment="IRONIC_ENDPOINT=${IRONIC_ENDPOINT}" Environment="IRONIC_EXTERNAL_URL_V6=${IRONIC_EXTERNAL_URL_V6}" Environment="LEASE_DURATION_SECONDS=137" diff --git a/data/data/bootstrap/baremetal/files/usr/local/bin/extract-machine-os.sh b/data/data/bootstrap/baremetal/files/usr/local/bin/extract-machine-os.sh deleted file mode 100644 index a903d2b872f..00000000000 --- a/data/data/bootstrap/baremetal/files/usr/local/bin/extract-machine-os.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -set -eu - -CID_FILE="$1" - -while ! podman run --rm --name machine-os-extractor --cidfile="${CID_FILE}" --cgroups=no-conmon --log-driver=passthrough --env IP_OPTIONS="${PROVISIONING_IP_OPTIONS}" -v systemd-ironic:/shared:z "${MACHINE_OS_IMAGES_IMAGE}" /bin/copy-metal --all /shared/html/images/; do - sleep 5 -done diff --git a/data/data/bootstrap/baremetal/systemd/units/extract-machine-os.service b/data/data/bootstrap/baremetal/systemd/units/extract-machine-os.service deleted file mode 100644 index a104648d38d..00000000000 --- a/data/data/bootstrap/baremetal/systemd/units/extract-machine-os.service +++ /dev/null @@ -1,27 +0,0 @@ -[Unit] -Description=Extract Machine OS Images -BindsTo=ironic-volume.service -Requires=build-ironic-env.service -Wants=crio.service -After=crio.service build-ironic-env.service ironic-volume.service -# Do not restart network interface while running -After=provisioning-interface.service -# The rootfs file must be created before the httpd container is started -# otherwise the kernel params in inspector.ipxe will not be set correctly -Before=ironic-httpd.service - -[Service] -Environment=PODMAN_SYSTEMD_UNIT=%n -EnvironmentFile=/etc/ironic.env -ExecStart=/usr/local/bin/extract-machine-os.sh %t/%N.cid -TimeoutStartSec=20m -ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%N.cid -ExecStopPost=/usr/bin/podman rm --ignore --cidfile=%t/%N.cid -ExecStopPost=-/bin/rm -f %t/%N.cid -Type=oneshot -RemainAfterExit=true -Restart=on-failure -RestartSec=10 - -[Install] -WantedBy=multi-user.target diff --git a/pkg/infrastructure/baremetal/bootstrap.go b/pkg/infrastructure/baremetal/bootstrap.go index d1f4d9f9cff..c4d24eb5460 100644 --- a/pkg/infrastructure/baremetal/bootstrap.go +++ b/pkg/infrastructure/baremetal/bootstrap.go @@ -57,7 +57,7 @@ func newDomain(name string) libvirtxml.Domain { Mode: "host-passthrough", }, Memory: &libvirtxml.DomainMemory{ - Value: 20, + Value: 18, Unit: "GiB", }, VCPU: &libvirtxml.DomainVCPU{ From 34fdaf8401a1a560f87b034fc4dfe6b28537d626 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 30 Jun 2025 17:42:37 +1200 Subject: [PATCH 08/14] Create scratch volume for /var We have almost 9GiB of data to store in /var (mostly container images in the crio storage), and since only 50% of RAM is used for the tmpfs this requires an inordinate amount of RAM. Create a volume for mounting as /var so that none of this data ends up in the tmpfs. This allows us to return to the original RAM allocation of 6GiB. Avoid creating a new tmpfs for node-image-pull.service to pull the ostree repo, and instead let it land on disk. This is added to the ignition during the creation of the VM, so other use cases for the baremetal bootstrap ignition (e.g. assisted installer) cannot be affected. --- .../systemd/system/var-ostreecontainer.mount | 1 + pkg/infrastructure/baremetal/bootstrap.go | 138 +++++++++++++++++- 2 files changed, 134 insertions(+), 5 deletions(-) diff --git a/data/data/bootstrap/files/etc/systemd/system/var-ostreecontainer.mount b/data/data/bootstrap/files/etc/systemd/system/var-ostreecontainer.mount index ed025f36fe2..b2e3b49339a 100644 --- a/data/data/bootstrap/files/etc/systemd/system/var-ostreecontainer.mount +++ b/data/data/bootstrap/files/etc/systemd/system/var-ostreecontainer.mount @@ -2,6 +2,7 @@ Requires=run-ephemeral.mount After=run-ephemeral.mount ConditionPathExists=/run/ostree-live +ConditionPathExists=!/etc/no-var-tmpfs [Mount] What=tmpfs diff --git a/pkg/infrastructure/baremetal/bootstrap.go b/pkg/infrastructure/baremetal/bootstrap.go index c4d24eb5460..aa084f3ccd6 100644 --- a/pkg/infrastructure/baremetal/bootstrap.go +++ b/pkg/infrastructure/baremetal/bootstrap.go @@ -2,6 +2,7 @@ package baremetal import ( "context" + "encoding/json" "encoding/xml" "errors" "fmt" @@ -10,11 +11,13 @@ import ( "os" "strings" + igntypes "github.com/coreos/ignition/v2/config/v3_2/types" "github.com/digitalocean/go-libvirt" "github.com/sirupsen/logrus" "libvirt.org/go/libvirtxml" "github.com/openshift/assisted-image-service/pkg/isoeditor" + "github.com/openshift/installer/pkg/asset/ignition" "github.com/openshift/installer/pkg/asset/rhcos" ) @@ -57,7 +60,7 @@ func newDomain(name string) libvirtxml.Domain { Mode: "host-passthrough", }, Memory: &libvirtxml.DomainMemory{ - Value: 18, + Value: 6, Unit: "GiB", }, VCPU: &libvirtxml.DomainVCPU{ @@ -166,6 +169,60 @@ func getLiveISO(config baremetalConfig, arch string) (string, error) { return fetcher.GetBaseISOFilename(context.Background(), arch) } +func bootstrapIgnition(config baremetalConfig) (*isoeditor.IgnitionContent, error) { + ign := &igntypes.Config{} + // TODO(zaneb): Put swap config into system ignition rather than modifying user ignition + if err := json.Unmarshal([]byte(config.IgnitionBootstrap), &ign); err != nil { + return nil, fmt.Errorf("failed to unmarshal bootstrap Ignition config: %w", err) + } + + fsLabel := "var" + partDev := fmt.Sprintf("/dev/disk/by-partlabel/%s", fsLabel) + format := "xfs" + path := "/var" + ign.Storage.Disks = append(ign.Storage.Disks, igntypes.Disk{ + Device: "/dev/vda", + Partitions: []igntypes.Partition{ + { + Number: 1, + Label: &fsLabel, + }, + }, + }) + ign.Storage.Filesystems = append(ign.Storage.Filesystems, igntypes.Filesystem{ + Device: partDev, + Label: &fsLabel, + Format: &format, + Path: &path, + }) + systemdPartDev := strings.ReplaceAll(strings.ReplaceAll(strings.TrimLeft(partDev, "/"), "-", "\\x2d"), "/", "-") + enabled := true + mountUnit := fmt.Sprintf(`[Unit] +Requires=systemd-fsck@%s.service +After=systemd-fsck@%s.service +[Mount] +Where=%s +What=%s +Type=%s + +[Install] +RequiredBy=localfs.target +`, + systemdPartDev, systemdPartDev, path, partDev, format) + ign.Systemd.Units = append(ign.Systemd.Units, igntypes.Unit{ + Name: fmt.Sprintf("%s.mount", fsLabel), + Contents: &mountUnit, + Enabled: &enabled, + }) + ign.Storage.Files = append(ign.Storage.Files, ignition.FileFromString("/etc/no-var-tmpfs", "root", 0o440, "")) + + ignData, err := ignition.Marshal(ign) + if err != nil { + return nil, fmt.Errorf("failed to marshal bootstrap Ignition config: %w", err) + } + return &isoeditor.IgnitionContent{Config: ignData}, nil +} + func createLiveVolume(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool) (libvirt.StorageVol, error) { capabilities, err := getHostCapabilities(virConn) if err != nil { @@ -178,13 +235,17 @@ func createLiveVolume(virConn *libvirt.Libvirt, config baremetalConfig, pool lib } defer os.Remove(isoFile) + ignition, err := bootstrapIgnition(config) + if err != nil { + return libvirt.StorageVol{}, err + } var kargs string if config.FIPS { kargs += " fips=1" } stream, err := isoeditor.NewRHCOSStreamReader( isoFile, - &isoeditor.IgnitionContent{Config: []byte(config.IgnitionBootstrap)}, + ignition, nil, []byte(kargs), ) @@ -234,6 +295,35 @@ func createLiveVolume(virConn *libvirt.Libvirt, config baremetalConfig, pool lib return liveVolume, nil } +func createScratchVolume(virConn *libvirt.Libvirt, clusterID string, pool libvirt.StoragePool) (libvirt.StorageVol, error) { + vol := libvirtxml.StorageVolume{ + Name: fmt.Sprintf("%s-scratch", clusterID), + Target: &libvirtxml.StorageVolumeTarget{ + Format: &libvirtxml.StorageVolumeTargetFormat{ + Type: "qcow2", + }, + Permissions: &libvirtxml.StorageVolumeTargetPermissions{ + Mode: "644", + }, + }, + Capacity: &libvirtxml.StorageVolumeSize{ + Unit: "GiB", + Value: 20, + }, + } + scratchVolumeXML, err := xml.Marshal(vol) + if err != nil { + return libvirt.StorageVol{}, err + } + + scratchVolume, err := virConn.StorageVolCreateXML(pool, string(scratchVolumeXML), 0) + if err != nil { + return libvirt.StorageVol{}, err + } + + return scratchVolume, nil +} + func getHostCapabilities(virConn *libvirt.Libvirt) (libvirtxml.Caps, error) { var caps libvirtxml.Caps @@ -250,7 +340,7 @@ func getHostCapabilities(virConn *libvirt.Libvirt) (libvirtxml.Caps, error) { return caps, nil } -func createBootstrapDomain(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool, liveCDVolume libvirt.StorageVol) error { +func createBootstrapDomain(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool, liveCDVolume, scratchVolume libvirt.StorageVol) error { bootstrapDom := newDomain(fmt.Sprintf("%s-bootstrap", config.ClusterID)) capabilities, err := getHostCapabilities(virConn) @@ -306,9 +396,30 @@ func createBootstrapDomain(virConn *libvirt.Libvirt, config baremetalConfig, poo Volume: liveCDVolume.Name, }, }, + Boot: &libvirtxml.DomainDeviceBoot{ + Order: 1, + }, + } + + scratchDisk := libvirtxml.DomainDisk{ + Device: "disk", + Target: &libvirtxml.DomainDiskTarget{ + Bus: "virtio", + Dev: "vda", + }, + Driver: &libvirtxml.DomainDiskDriver{ + Name: "qemu", + Type: "qcow2", + }, + Source: &libvirtxml.DomainDiskSource{ + Volume: &libvirtxml.DomainDiskSourceVolume{ + Pool: pool.Name, + Volume: scratchVolume.Name, + }, + }, } - bootstrapDom.Devices.Disks = append(bootstrapDom.Devices.Disks, liveCD) + bootstrapDom.Devices.Disks = append(bootstrapDom.Devices.Disks, liveCD, scratchDisk) bootstrapDom.Resource = nil @@ -354,8 +465,14 @@ func createBootstrap(config baremetalConfig) error { return err } + logrus.Debug(" Creating scratch volume") + scratchVolume, err := createScratchVolume(virConn, config.ClusterID, pool) + if err != nil { + return err + } + logrus.Debug(" Creating bootstrap domain") - err = createBootstrapDomain(virConn, config, pool, liveVolume) + err = createBootstrapDomain(virConn, config, pool, liveVolume, scratchVolume) if err != nil { return err } @@ -424,6 +541,17 @@ func destroyBootstrap(config baremetalConfig) error { return err } + vol, err = virConn.StorageVolLookupByName(pool, fmt.Sprintf("%s-scratch", config.ClusterID)) + if err != nil { + return err + } + + logrus.Debug(" Deleting scratch volume") + err = virConn.StorageVolDelete(vol, libvirt.StorageVolDeleteNormal) + if err != nil { + return err + } + logrus.Debug(" Destroying pool") err = virConn.StoragePoolDestroy(pool) if err != nil { From 651e843d2d01d5e37fe8e9d6dece7f364a513bf7 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Wed, 2 Jul 2025 10:33:52 +1200 Subject: [PATCH 09/14] Mark baremetal.platform.clusterOSImage field as deprecated This has not been required since 4.10, as we now can deploy the RHCOS live ISO to disk directly, and there is no need for a separate qcow image. --- data/data/install.openshift.io_installconfigs.yaml | 2 ++ pkg/asset/agent/installconfig_test.go | 2 +- pkg/asset/ignition/bootstrap/baremetal/template.go | 4 ---- pkg/asset/rhcos/image.go | 2 +- pkg/types/baremetal/platform.go | 4 +++- pkg/types/baremetal/validation/platform.go | 2 +- pkg/types/baremetal/validation/platform_test.go | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/data/data/install.openshift.io_installconfigs.yaml b/data/data/install.openshift.io_installconfigs.yaml index b287fd6f861..56e14e49057 100644 --- a/data/data/install.openshift.io_installconfigs.yaml +++ b/data/data/install.openshift.io_installconfigs.yaml @@ -5870,6 +5870,8 @@ spec: ClusterOSImage is a URL to override the default OS image for cluster nodes. The URL must contain a sha256 hash of the image e.g https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8... + Deprecated: This is no longer required, the OS image is now part of the + OpenShift release. type: string clusterProvisioningIP: description: |- diff --git a/pkg/asset/agent/installconfig_test.go b/pkg/asset/agent/installconfig_test.go index c988c0e7a98..caab408682b 100644 --- a/pkg/asset/agent/installconfig_test.go +++ b/pkg/asset/agent/installconfig_test.go @@ -1522,7 +1522,7 @@ pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"c3VwZXItc2VjcmV0Cg==\"}}}" IngressVIPs: []string{"192.168.122.11"}, DNSRecordsType: configv1.DNSRecordsTypeInternal, BootstrapOSImage: "https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd", - ClusterOSImage: "https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8", + DeprecatedClusterOSImage: "https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8", BootstrapExternalStaticIP: "192.1168.122.50", BootstrapExternalStaticGateway: "gateway", AdditionalNTPServers: []string{"10.0.1.1", "10.0.1.2"}, diff --git a/pkg/asset/ignition/bootstrap/baremetal/template.go b/pkg/asset/ignition/bootstrap/baremetal/template.go index cae7572ceb6..98810b436e7 100644 --- a/pkg/asset/ignition/bootstrap/baremetal/template.go +++ b/pkg/asset/ignition/bootstrap/baremetal/template.go @@ -53,9 +53,6 @@ type TemplateData struct { // BaremetalIntrospectionEndpointOverride contains the url for the baremetal introspection endpoint BaremetalIntrospectionEndpointOverride string - // ClusterOSImage contains 4 URLs to download RHCOS live iso, kernel, rootfs and initramfs - ClusterOSImage string - // API VIP for use by ironic during bootstrap. APIVIPs []string @@ -230,7 +227,6 @@ func GetTemplateData(config *baremetal.Platform, networks []types.MachineNetwork templateData.IronicUsername = ironicUsername templateData.IronicPassword = ironicPassword - templateData.ClusterOSImage = config.ClusterOSImage return &templateData } diff --git a/pkg/asset/rhcos/image.go b/pkg/asset/rhcos/image.go index aeef055a389..f062b87e4a3 100644 --- a/pkg/asset/rhcos/image.go +++ b/pkg/asset/rhcos/image.go @@ -170,7 +170,7 @@ func osImage(ctx context.Context, ic *installconfig.InstallConfig, machinePool * return azi, nil case baremetal.Name: // Check for image URL override - if oi := platform.BareMetal.ClusterOSImage; oi != "" { + if oi := platform.BareMetal.DeprecatedClusterOSImage; oi != "" { return oi, nil } // Use image from release payload diff --git a/pkg/types/baremetal/platform.go b/pkg/types/baremetal/platform.go index 712a521cb50..73ae8a8ce1a 100644 --- a/pkg/types/baremetal/platform.go +++ b/pkg/types/baremetal/platform.go @@ -216,9 +216,11 @@ type Platform struct { // ClusterOSImage is a URL to override the default OS image // for cluster nodes. The URL must contain a sha256 hash of the image // e.g https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8... + // Deprecated: This is no longer required, the OS image is now part of the + // OpenShift release. // // +optional - ClusterOSImage string `json:"clusterOSImage,omitempty"` + DeprecatedClusterOSImage string `json:"clusterOSImage,omitempty"` // BootstrapExternalStaticIP is the static IP address of the bootstrap node. // This can be useful in environments without a DHCP server. diff --git a/pkg/types/baremetal/validation/platform.go b/pkg/types/baremetal/validation/platform.go index e765b9023f2..eaa2983f6ae 100644 --- a/pkg/types/baremetal/validation/platform.go +++ b/pkg/types/baremetal/validation/platform.go @@ -206,7 +206,7 @@ func validateOSImages(p *baremetal.Platform, fldPath *field.Path) field.ErrorLis fields := map[string]string{ "bootstrapOSImage": p.BootstrapOSImage, - "clusterOSImage": p.ClusterOSImage, + "clusterOSImage": p.DeprecatedClusterOSImage, } for fieldName, url := range fields { diff --git a/pkg/types/baremetal/validation/platform_test.go b/pkg/types/baremetal/validation/platform_test.go index 817c8d0cb3e..17a9b08aa83 100644 --- a/pkg/types/baremetal/validation/platform_test.go +++ b/pkg/types/baremetal/validation/platform_test.go @@ -958,7 +958,7 @@ func (pb *platformBuilder) BootstrapOSImage(value string) *platformBuilder { } func (pb *platformBuilder) ClusterOSImage(value string) *platformBuilder { - pb.Platform.ClusterOSImage = value + pb.Platform.DeprecatedClusterOSImage = value return pb } From eb7b5a024ac5800a677283ecb51505088a60fa2f Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Tue, 1 Jul 2025 23:45:15 +1200 Subject: [PATCH 10/14] Deprecate platform.baremetal.bootstrapOSImage field This field is neither required nor used if it is provided. --- data/data/install.openshift.io_installconfigs.yaml | 1 + pkg/asset/agent/installconfig.go | 5 ----- pkg/asset/agent/installconfig_test.go | 2 +- pkg/asset/rhcos/bootstrap_image.go | 5 +---- pkg/types/baremetal/platform.go | 3 ++- pkg/types/baremetal/validation/platform.go | 10 +++++----- pkg/types/baremetal/validation/platform_test.go | 2 +- 7 files changed, 11 insertions(+), 17 deletions(-) diff --git a/data/data/install.openshift.io_installconfigs.yaml b/data/data/install.openshift.io_installconfigs.yaml index 56e14e49057..b3eb531e5f0 100644 --- a/data/data/install.openshift.io_installconfigs.yaml +++ b/data/data/install.openshift.io_installconfigs.yaml @@ -5857,6 +5857,7 @@ spec: BootstrapOSImage is a URL to override the default OS image for the bootstrap node. The URL must contain a sha256 hash of the image e.g https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd... + Deprecated: This is no longer used. type: string bootstrapProvisioningIP: description: |- diff --git a/pkg/asset/agent/installconfig.go b/pkg/asset/agent/installconfig.go index a876a5b5bb8..2bff2e3ed01 100644 --- a/pkg/asset/agent/installconfig.go +++ b/pkg/asset/agent/installconfig.go @@ -523,11 +523,6 @@ func warnUnusedConfig(installConfig *types.InstallConfig) { fieldPath := bmPath.Child("defaultMachinePlatform") logrus.Warnf("%s: %s is ignored", fieldPath, baremetal.DefaultMachinePlatform) } - if baremetal.BootstrapOSImage != "" { - fieldPath := bmPath.Child("bootstrapOSImage") - logrus.Debugf("%s: %s is ignored", fieldPath, baremetal.BootstrapOSImage) - } - // ClusterOSImage is ignored even in IPI now, so we probably don't need to check it at all. if baremetal.BootstrapExternalStaticIP != "" { fieldPath := bmPath.Child("bootstrapExternalStaticIP") diff --git a/pkg/asset/agent/installconfig_test.go b/pkg/asset/agent/installconfig_test.go index caab408682b..d02e720c270 100644 --- a/pkg/asset/agent/installconfig_test.go +++ b/pkg/asset/agent/installconfig_test.go @@ -1521,7 +1521,7 @@ pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"c3VwZXItc2VjcmV0Cg==\"}}}" DeprecatedIngressVIP: "192.168.122.11", IngressVIPs: []string{"192.168.122.11"}, DNSRecordsType: configv1.DNSRecordsTypeInternal, - BootstrapOSImage: "https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd", + DeprecatedBootstrapOSImage: "https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd", DeprecatedClusterOSImage: "https://mirror.example.com/images/metal.qcow2.gz?sha256=3b5a8", BootstrapExternalStaticIP: "192.1168.122.50", BootstrapExternalStaticGateway: "gateway", diff --git a/pkg/asset/rhcos/bootstrap_image.go b/pkg/asset/rhcos/bootstrap_image.go index 58373799b24..1f17c87728d 100644 --- a/pkg/asset/rhcos/bootstrap_image.go +++ b/pkg/asset/rhcos/bootstrap_image.go @@ -39,10 +39,7 @@ func (i *BootstrapImage) Generate(ctx context.Context, p asset.Parents) error { switch config.Platform.Name() { case baremetal.Name: - // Check for CoreOS image URL override - if boi := config.Platform.BareMetal.BootstrapOSImage; boi != "" { - *i = BootstrapImage(boi) - } + // Use image from release payload return nil default: // other platforms use the same image for all nodes diff --git a/pkg/types/baremetal/platform.go b/pkg/types/baremetal/platform.go index 73ae8a8ce1a..6a3536c7bbe 100644 --- a/pkg/types/baremetal/platform.go +++ b/pkg/types/baremetal/platform.go @@ -209,9 +209,10 @@ type Platform struct { // BootstrapOSImage is a URL to override the default OS image // for the bootstrap node. The URL must contain a sha256 hash of the image // e.g https://mirror.example.com/images/qemu.qcow2.gz?sha256=a07bd... + // Deprecated: This is no longer used. // // +optional - BootstrapOSImage string `json:"bootstrapOSImage,omitempty"` + DeprecatedBootstrapOSImage string `json:"bootstrapOSImage,omitempty"` // ClusterOSImage is a URL to override the default OS image // for cluster nodes. The URL must contain a sha256 hash of the image diff --git a/pkg/types/baremetal/validation/platform.go b/pkg/types/baremetal/validation/platform.go index eaa2983f6ae..d69e15204fb 100644 --- a/pkg/types/baremetal/validation/platform.go +++ b/pkg/types/baremetal/validation/platform.go @@ -205,7 +205,7 @@ func validateOSImages(p *baremetal.Platform, fldPath *field.Path) field.ErrorLis var errs field.ErrorList fields := map[string]string{ - "bootstrapOSImage": p.BootstrapOSImage, + "bootstrapOSImage": p.DeprecatedBootstrapOSImage, "clusterOSImage": p.DeprecatedClusterOSImage, } @@ -213,12 +213,12 @@ func validateOSImages(p *baremetal.Platform, fldPath *field.Path) field.ErrorLis if url == "" { continue } + path := fldPath.Child(fieldName) + logrus.Infof("%s is no longer required", path.String()) if err := validateOSImageURI(url); err != nil { - errs = append(errs, - field.Invalid(fldPath.Child(fieldName), url, err.Error())) + errs = append(errs, field.Invalid(path, url, err.Error())) } else if res, err := http.Head(url); err != nil || res.StatusCode != http.StatusOK /* #nosec G107 */ { - errs = append(errs, - field.NotFound(fldPath.Child(fieldName), url)) + errs = append(errs, field.NotFound(path, url)) } } return errs diff --git a/pkg/types/baremetal/validation/platform_test.go b/pkg/types/baremetal/validation/platform_test.go index 17a9b08aa83..3b86a0a64c5 100644 --- a/pkg/types/baremetal/validation/platform_test.go +++ b/pkg/types/baremetal/validation/platform_test.go @@ -953,7 +953,7 @@ func (pb *platformBuilder) BootstrapProvisioningIP(value string) *platformBuilde } func (pb *platformBuilder) BootstrapOSImage(value string) *platformBuilder { - pb.Platform.BootstrapOSImage = value + pb.Platform.DeprecatedBootstrapOSImage = value return pb } From ed770170d2ffa92fe9aa53355fce80284c1fd09c Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 7 Apr 2025 16:04:19 +1200 Subject: [PATCH 11/14] Clean up setup-image-data.sh --- .../bin/{setup-image-data.sh.template => setup-image-data.sh} | 2 -- 1 file changed, 2 deletions(-) rename data/data/bootstrap/baremetal/files/usr/local/bin/{setup-image-data.sh.template => setup-image-data.sh} (73%) diff --git a/data/data/bootstrap/baremetal/files/usr/local/bin/setup-image-data.sh.template b/data/data/bootstrap/baremetal/files/usr/local/bin/setup-image-data.sh similarity index 73% rename from data/data/bootstrap/baremetal/files/usr/local/bin/setup-image-data.sh.template rename to data/data/bootstrap/baremetal/files/usr/local/bin/setup-image-data.sh index bec88f24d18..08777af87e5 100644 --- a/data/data/bootstrap/baremetal/files/usr/local/bin/setup-image-data.sh.template +++ b/data/data/bootstrap/baremetal/files/usr/local/bin/setup-image-data.sh @@ -2,7 +2,5 @@ set -euo pipefail -export KUBECONFIG=/opt/openshift/auth/kubeconfig-loopback - # Create a podman secret for the image-customization-server base64 -w 0 /root/.docker/config.json | podman secret create pull-secret - From c5a583335a0f0c7dd3b545af5429cff93b4c72ed Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 7 Apr 2025 23:33:25 +1200 Subject: [PATCH 12/14] Avoid reporting error from node-image-finish.service Doing systemctl isolate results in killing node-image-finish.service itself, which is thereafter reported as a failed unit. Stop the unit without killing the command so that this does not show up as a failure. --- .../bootstrap/files/etc/systemd/system/node-image-finish.service | 1 + 1 file changed, 1 insertion(+) diff --git a/data/data/bootstrap/files/etc/systemd/system/node-image-finish.service b/data/data/bootstrap/files/etc/systemd/system/node-image-finish.service index 33d6960f824..eb8c55cdbb4 100644 --- a/data/data/bootstrap/files/etc/systemd/system/node-image-finish.service +++ b/data/data/bootstrap/files/etc/systemd/system/node-image-finish.service @@ -11,3 +11,4 @@ Type=oneshot # and now, back to our regularly scheduled programming... ExecStart=/usr/bin/echo "Node image overlay complete; switching back to multi-user.target" ExecStart=/usr/bin/systemctl --no-block isolate multi-user.target +KillMode=none From 0d16ebab414427e66274c17ca71e7cd4dd7954ec Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 22 Sep 2025 12:03:36 +1200 Subject: [PATCH 13/14] Update assisted-image-service library --- go.mod | 9 +- go.sum | 17 +- .../github.com/diskfs/go-diskfs/.golangci.yml | 4 - vendor/github.com/diskfs/go-diskfs/Makefile | 2 +- vendor/github.com/diskfs/go-diskfs/README.md | 45 +- .../github.com/diskfs/go-diskfs/disk/disk.go | 18 + .../diskfs/go-diskfs/disk/disk_wasip1.go | 10 + vendor/github.com/diskfs/go-diskfs/diskfs.go | 6 +- .../diskfs/go-diskfs/diskfs_other.go | 4 +- .../go-diskfs/filesystem/ext4/blockgroup.go | 55 + .../go-diskfs/filesystem/ext4/checksum.go | 50 + .../go-diskfs/filesystem/ext4/consts.go | 5 + .../go-diskfs/filesystem/ext4/crc/crc16.go | 44 + .../go-diskfs/filesystem/ext4/crc/crc32.go | 74 + .../go-diskfs/filesystem/ext4/directory.go | 211 + .../filesystem/ext4/directoryentry.go | 176 + .../go-diskfs/filesystem/ext4/dirhash.go | 157 + .../diskfs/go-diskfs/filesystem/ext4/ext4.go | 1748 +++++++ .../diskfs/go-diskfs/filesystem/ext4/ext4.md | 335 ++ .../go-diskfs/filesystem/ext4/extent.go | 733 +++ .../go-diskfs/filesystem/ext4/features.go | 451 ++ .../diskfs/go-diskfs/filesystem/ext4/file.go | 208 + .../go-diskfs/filesystem/ext4/fileinfo.go | 48 + .../filesystem/ext4/groupdescriptors.go | 327 ++ .../diskfs/go-diskfs/filesystem/ext4/inode.go | 588 +++ .../filesystem/ext4/journaldevice_other.go | 12 + .../filesystem/ext4/journaldevice_shared.go | 40 + .../filesystem/ext4/journaldevice_windows.go | 11 + .../go-diskfs/filesystem/ext4/md4/md4.go | 73 + .../go-diskfs/filesystem/ext4/miscflags.go | 34 + .../go-diskfs/filesystem/ext4/mountoptions.go | 182 + .../go-diskfs/filesystem/ext4/superblock.go | 768 +++ .../diskfs/go-diskfs/filesystem/ext4/util.go | 106 + .../go-diskfs/filesystem/fat32/fat32.go | 34 +- .../diskfs/go-diskfs/filesystem/filesystem.go | 2 + .../filesystem/iso9660/directoryentry.go | 51 +- .../directoryentrysystemuseextension.go | 2 +- .../go-diskfs/filesystem/iso9660/eltorito.go | 12 +- .../go-diskfs/filesystem/iso9660/file.go | 2 +- .../go-diskfs/filesystem/iso9660/finalize.go | 259 +- .../go-diskfs/filesystem/iso9660/iso9660.go | 56 +- .../go-diskfs/filesystem/iso9660/rockridge.go | 51 +- .../filesystem/squashfs/compressor.go | 34 +- .../filesystem/squashfs/directory.go | 13 +- .../filesystem/squashfs/directoryentry.go | 175 +- .../go-diskfs/filesystem/squashfs/file.go | 86 +- .../go-diskfs/filesystem/squashfs/finalize.go | 29 +- .../filesystem/squashfs/finalize_wasip1.go | 18 + .../go-diskfs/filesystem/squashfs/inode.go | 23 +- .../go-diskfs/filesystem/squashfs/lru.go | 138 + .../filesystem/squashfs/metadatablock.go | 59 +- .../go-diskfs/filesystem/squashfs/squashfs.go | 174 +- .../go-diskfs/filesystem/squashfs/xattr.go | 4 +- .../go-diskfs/partition/gpt/partition.go | 11 + .../diskfs/go-diskfs/partition/gpt/table.go | 153 +- .../go-diskfs/partition/mbr/partition.go | 10 +- .../diskfs/go-diskfs/partition/mbr/table.go | 62 +- .../go-diskfs/partition/part/partition.go | 1 + .../diskfs/go-diskfs/partition/table.go | 3 + .../diskfs/go-diskfs/util/bitmap.go | 171 + .../diskfs/go-diskfs/util/uniqify.go | 13 + .../djherbis/times}/LICENSE | 0 .../djherbis/times}/README.md | 11 +- .../djherbis/times}/ctime_windows.go | 0 .../djherbis/times}/js.cover.dockerfile | 4 +- .../djherbis/times}/js.cover.sh | 0 .../djherbis/times/linux.cover.dockerfile | 6 + .../github.com/djherbis/times/linux.cover.sh | 7 + .../djherbis/times}/times.go | 0 .../djherbis/times}/times_aix.go | 0 .../djherbis/times}/times_darwin.go | 0 .../djherbis/times}/times_dragonfly.go | 0 .../djherbis/times}/times_freebsd.go | 0 .../djherbis/times}/times_js.go | 0 .../github.com/djherbis/times/times_linux.go | 185 + .../djherbis/times}/times_nacl.go | 0 .../djherbis/times}/times_netbsd.go | 0 .../djherbis/times}/times_openbsd.go | 0 .../djherbis/times}/times_plan9.go | 0 .../djherbis/times}/times_solaris.go | 0 .../djherbis/times/times_wasip1.go} | 15 +- .../djherbis/times}/times_windows.go | 0 .../djherbis/times}/use_generic_stat.go | 11 +- .../klauspost/compress/.gitattributes | 2 + .../github.com/klauspost/compress/.gitignore | 32 + .../klauspost/compress/.goreleaser.yml | 123 + vendor/github.com/klauspost/compress/LICENSE | 304 ++ .../github.com/klauspost/compress/README.md | 671 +++ .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 167 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 683 +++ .../klauspost/compress/fse/decompress.go | 376 ++ .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 224 + .../klauspost/compress/huff0/bitwriter.go | 102 + .../klauspost/compress/huff0/compress.go | 742 +++ .../klauspost/compress/huff0/decompress.go | 1167 +++++ .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 830 ++++ .../compress/huff0/decompress_generic.go | 299 ++ .../klauspost/compress/huff0/huff0.go | 337 ++ .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../klauspost/compress/internal/le/le.go | 5 + .../compress/internal/le/unsafe_disabled.go | 42 + .../compress/internal/le/unsafe_enabled.go | 55 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 ++ .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 ++ .../compress/internal/snapref/encode_other.go | 250 + .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 3 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 ++ .../klauspost/compress/zstd/bitreader.go | 135 + .../klauspost/compress/zstd/bitwriter.go | 112 + .../klauspost/compress/zstd/blockdec.go | 712 +++ .../klauspost/compress/zstd/blockenc.go | 892 ++++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 261 ++ .../klauspost/compress/zstd/decoder.go | 949 ++++ .../compress/zstd/decoder_options.go | 169 + .../klauspost/compress/zstd/dict.go | 565 +++ .../klauspost/compress/zstd/enc_base.go | 173 + .../klauspost/compress/zstd/enc_best.go | 560 +++ .../klauspost/compress/zstd/enc_better.go | 1252 +++++ .../klauspost/compress/zstd/enc_dfast.go | 1123 +++++ .../klauspost/compress/zstd/enc_fast.go | 891 ++++ .../klauspost/compress/zstd/encoder.go | 642 +++ .../compress/zstd/encoder_options.go | 339 ++ .../klauspost/compress/zstd/framedec.go | 415 ++ .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 ++ .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 73 + .../klauspost/compress/zstd/fse_encoder.go | 701 +++ .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 66 + .../compress/zstd/matchlen_generic.go | 38 + .../klauspost/compress/zstd/seqdec.go | 503 ++ .../klauspost/compress/zstd/seqdec_amd64.go | 394 ++ .../klauspost/compress/zstd/seqdec_amd64.s | 4151 +++++++++++++++++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 112 + .../klauspost/compress/zstd/snappy.go | 434 ++ .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 126 + .../internal/common/version.go | 17 + .../pkg/isoeditor/ignition.go | 58 +- .../pkg/isoeditor/isoutil.go | 60 +- .../pkg/isoeditor/mock_editor.go | 8 +- .../pkg/isoeditor/mock_executer.go | 49 + .../pkg/isoeditor/mock_nmstate_handler.go | 49 + .../pkg/isoeditor/nmstate_handler.go | 135 + .../pkg/isoeditor/rhcos.go | 89 +- .../sirupsen/logrus/terminal_check_bsd.go | 2 +- .../sirupsen/logrus/terminal_check_unix.go | 2 + .../sirupsen/logrus/terminal_check_wasi.go | 8 + .../sirupsen/logrus/terminal_check_wasip1.go | 8 + vendor/gopkg.in/djherbis/times.v1/.travis.sh | 28 - vendor/gopkg.in/djherbis/times.v1/.travis.yml | 28 - vendor/modules.txt | 28 +- 185 files changed, 35213 insertions(+), 585 deletions(-) create mode 100644 vendor/github.com/diskfs/go-diskfs/disk/disk_wasip1.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/blockgroup.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/checksum.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/consts.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc16.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc32.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directory.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directoryentry.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/dirhash.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.md create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/extent.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/features.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/file.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/fileinfo.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/groupdescriptors.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/inode.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_other.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_shared.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_windows.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/md4/md4.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/miscflags.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/mountoptions.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/superblock.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/ext4/util.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize_wasip1.go create mode 100644 vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/lru.go create mode 100644 vendor/github.com/diskfs/go-diskfs/util/bitmap.go create mode 100644 vendor/github.com/diskfs/go-diskfs/util/uniqify.go rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/LICENSE (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/README.md (82%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/ctime_windows.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/js.cover.dockerfile (75%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/js.cover.sh (100%) create mode 100644 vendor/github.com/djherbis/times/linux.cover.dockerfile create mode 100644 vendor/github.com/djherbis/times/linux.cover.sh rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_aix.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_darwin.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_dragonfly.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_freebsd.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_js.go (100%) create mode 100644 vendor/github.com/djherbis/times/times_linux.go rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_nacl.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_netbsd.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_openbsd.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_plan9.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_solaris.go (100%) rename vendor/{gopkg.in/djherbis/times.v1/times_linux.go => github.com/djherbis/times/times_wasip1.go} (60%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/times_windows.go (100%) rename vendor/{gopkg.in/djherbis/times.v1 => github.com/djherbis/times}/use_generic_stat.go (58%) create mode 100644 vendor/github.com/klauspost/compress/.gitattributes create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/le/le.go create mode 100644 vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go create mode 100644 vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/openshift/assisted-image-service/internal/common/version.go create mode 100644 vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_executer.go create mode 100644 vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_nmstate_handler.go create mode 100644 vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/nmstate_handler.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_wasi.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go delete mode 100644 vendor/gopkg.in/djherbis/times.v1/.travis.sh delete mode 100644 vendor/gopkg.in/djherbis/times.v1/.travis.yml diff --git a/go.mod b/go.mod index fe6c0cb8a68..6f62ec9585e 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/coreos/stream-metadata-go v0.4.10-0.20250806142651-4a7d280a6c7b github.com/daixiang0/gci v0.13.5 github.com/digitalocean/go-libvirt v0.0.0-20240220204746-fcabe97a6eed - github.com/diskfs/go-diskfs v1.4.0 + github.com/diskfs/go-diskfs v1.4.1 github.com/form3tech-oss/jwt-go v3.2.3+incompatible github.com/go-logr/logr v1.4.3 github.com/go-openapi/errors v0.22.1 @@ -74,7 +74,7 @@ require ( github.com/nutanix-cloud-native/prism-go-client v0.5.0 github.com/onsi/gomega v1.38.2 github.com/openshift/api v0.0.0-20260228183123-9b2ee997d297 - github.com/openshift/assisted-image-service v0.0.0-20240607085136-02df2e56dde6 + github.com/openshift/assisted-image-service v0.0.0-20250917153356-4ca9ff81f712 github.com/openshift/assisted-service/api v0.0.0 github.com/openshift/assisted-service/client v0.0.0 github.com/openshift/assisted-service/models v0.0.0 @@ -96,7 +96,7 @@ require ( github.com/prometheus/common v0.67.4 github.com/rogpeppe/go-internal v1.14.1 github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd - github.com/sirupsen/logrus v1.9.3 + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af github.com/spf13/cobra v1.10.1 github.com/stretchr/testify v1.11.1 github.com/thedevsaddam/retry v0.0.0-20200324223450-9769a859cc6d @@ -230,6 +230,7 @@ require ( github.com/coreos/go-systemd/v22 v22.6.0 // indirect github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/djherbis/times v1.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/elliotwutingfeng/asciiset v0.0.0-20230602022725-51bbb787efab // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect @@ -274,6 +275,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/kr/fs v0.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect @@ -338,7 +340,6 @@ require ( google.golang.org/genproto v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect google.golang.org/protobuf v1.36.10 // indirect - gopkg.in/djherbis/times.v1 v1.3.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/gcfg.v1 v1.2.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index d8a2126e8a4..798361c3c05 100644 --- a/go.sum +++ b/go.sum @@ -347,10 +347,12 @@ github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42 github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= github.com/digitalocean/go-libvirt v0.0.0-20240220204746-fcabe97a6eed h1:pDXysiX24X+SE6MwVcfd5lGE21a4jNH9ZgaF9AyshHY= github.com/digitalocean/go-libvirt v0.0.0-20240220204746-fcabe97a6eed/go.mod h1:isF7ghADfbC01gQx4vZnIOrxXT5RXLG81y+UCb5XSwc= -github.com/diskfs/go-diskfs v1.4.0 h1:MAybY6TPD+fmhY+a2qFhmdvMeIKvCqlgh4QIc1uCmBs= -github.com/diskfs/go-diskfs v1.4.0/go.mod h1:G8cyy+ngM+3yKlqjweMmtqvE+TxsnIo1xumbJX1AeLg= +github.com/diskfs/go-diskfs v1.4.1 h1:iODgkzHLmvXS+1VDztpW53T+dQm8GQzi20y9yUd5UCA= +github.com/diskfs/go-diskfs v1.4.1/go.mod h1:+tOkQs8CMMog6Nvljg8DGIxEXrgL48iyT3OM3IlSz74= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= +github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -896,8 +898,8 @@ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openshift/api v0.0.0-20260228183123-9b2ee997d297 h1:QoHTB3QS859LUGE6NUTg98XiMz6Kzm3svQmo4tmgmlg= github.com/openshift/api v0.0.0-20260228183123-9b2ee997d297/go.mod h1:ZYAxo9t1AALeEotN07tNzIvqqqWSxcZIqMUKnY/xCeQ= -github.com/openshift/assisted-image-service v0.0.0-20240607085136-02df2e56dde6 h1:U6ve+dnHlHhAELoxX+rdFOHVhoaYl0l9qtxwYtsO6C0= -github.com/openshift/assisted-image-service v0.0.0-20240607085136-02df2e56dde6/go.mod h1:o2H5VwQhUD8P6XsK6dRmKpCCJqVvv12KJQZBXmcCXCU= +github.com/openshift/assisted-image-service v0.0.0-20250917153356-4ca9ff81f712 h1:UJVh+I/AWZcOJASGdiLcTXkWB1OYNhS/383DHMcRvCQ= +github.com/openshift/assisted-image-service v0.0.0-20250917153356-4ca9ff81f712/go.mod h1:WGdSeSnK0voEWWwA4ar5eApNjGBLmGTpFurEKw/FXJc= github.com/openshift/assisted-service/api v0.0.0-20250922204150-a52b83145bea h1:YhJ9iHKKT5ooAdVr8qq3BdudhTxP/WF0XYDT5gzi1ak= github.com/openshift/assisted-service/api v0.0.0-20250922204150-a52b83145bea/go.mod h1:wA7MaLcf/KoUl7fhB1bHBdhRBLjWPih90sHpxOV6ZLE= github.com/openshift/assisted-service/client v0.0.0-20250922204150-a52b83145bea h1:nYepkoJZSEjQEadaZ7oZraaeTug0zSV43HISLaHTCF0= @@ -1029,8 +1031,8 @@ github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJV github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/sivchari/tenv v1.12.1 h1:+E0QzjktdnExv/wwsnnyk4oqZBUfuh89YMQT1cyuvSY= @@ -1295,6 +1297,7 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1402,8 +1405,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/djherbis/times.v1 v1.3.0 h1:uxMS4iMtH6Pwsxog094W0FYldiNnfY/xba00vq6C2+o= -gopkg.in/djherbis/times.v1 v1.3.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= diff --git a/vendor/github.com/diskfs/go-diskfs/.golangci.yml b/vendor/github.com/diskfs/go-diskfs/.golangci.yml index 2ec29ec289d..5434216b268 100644 --- a/vendor/github.com/diskfs/go-diskfs/.golangci.yml +++ b/vendor/github.com/diskfs/go-diskfs/.golangci.yml @@ -21,7 +21,6 @@ linters: disable-all: true enable: - bodyclose - - depguard - dogsled - dupl - errcheck @@ -49,9 +48,6 @@ linters: - unconvert - unparam - whitespace - # - wsl # лишние пустые строки и т.д., чистый стиль - # - goconst # проверка на наличие переменных, которых следовало бы вынести в const - # - gomnd # поиск всяких "магических" чисел, переменных run: issues-exit-code: 1 diff --git a/vendor/github.com/diskfs/go-diskfs/Makefile b/vendor/github.com/diskfs/go-diskfs/Makefile index 7aacf67c928..24bbc2f446a 100644 --- a/vendor/github.com/diskfs/go-diskfs/Makefile +++ b/vendor/github.com/diskfs/go-diskfs/Makefile @@ -6,7 +6,7 @@ GOENV ?= GO111MODULE=on CGO_ENABLED=0 GO_FILES ?= $(shell $(GOENV) go list ./...) GOBIN ?= $(shell go env GOPATH)/bin LINTER ?= $(GOBIN)/golangci-lint -LINTER_VERSION ?= v1.51.2 +LINTER_VERSION ?= v1.55.2 # BUILDARCH is the host architecture # ARCH is the target architecture diff --git a/vendor/github.com/diskfs/go-diskfs/README.md b/vendor/github.com/diskfs/go-diskfs/README.md index aa290f7b7a4..e230077b823 100644 --- a/vendor/github.com/diskfs/go-diskfs/README.md +++ b/vendor/github.com/diskfs/go-diskfs/README.md @@ -85,49 +85,7 @@ Some filesystem types are intended to be created once, after which they are read ### Example -There are examples in the [examples/](./examples/) directory. Here is one to get you started. - -The following example will create a fully bootable EFI disk image. It assumes you have a bootable EFI file (any modern Linux kernel compiled with `CONFIG_EFI_STUB=y` will work) available. - -```go -import diskfs "github.com/diskfs/go-diskfs" - -espSize int := 100*1024*1024 // 100 MB -diskSize int := espSize + 4*1024*1024 // 104 MB - - -// create a disk image -diskImg := "/tmp/disk.img" -disk := diskfs.Create(diskImg, diskSize, diskfs.Raw, diskfs.SectorSizeDefault) -// create a partition table -blkSize int := 512 -partitionSectors int := espSize / blkSize -partitionStart int := 2048 -partitionEnd int := partitionSectors - partitionStart + 1 -table := PartitionTable{ - type: partition.GPT, - partitions:[ - Partition{Start: partitionStart, End: partitionEnd, Type: partition.EFISystemPartition, Name: "EFI System"} - ] -} -// apply the partition table -err = disk.Partition(table) - - -/* - * create an ESP partition with some contents - */ -kernel, err := os.ReadFile("/some/kernel/file") - -fs, err := disk.CreateFilesystem(0, diskfs.TypeFat32) - -// make our directories -err = fs.Mkdir("/EFI/BOOT") -rw, err := fs.OpenFile("/EFI/BOOT/BOOTX64.EFI", os.O_CREATE|os.O_RDRWR) - -err = rw.Write(kernel) - -``` +There are examples in the [examples/](./examples/) directory. See for example how to [create a fully bootable EFI disk image](./examples/efi_create.go). ## Tests There are two ways to run tests: unit and integration (somewhat loosely defined). @@ -151,7 +109,6 @@ cat $PWD/foo.img | docker run -i --rm $INT_IMAGE mdir -i /file.img /abc Future plans are to add the following: * embed boot code in `mbr` e.g. `altmbr.bin` (no need for `gpt` since an ESP with `/EFI/BOOT/BOOT.EFI` will boot) -* `ext4` filesystem * `Joliet` extensions to `iso9660` * `Rock Ridge` sparse file support - supports the flag, but not yet reading or writing * `squashfs` sparse file support - currently treats sparse files as regular files diff --git a/vendor/github.com/diskfs/go-diskfs/disk/disk.go b/vendor/github.com/diskfs/go-diskfs/disk/disk.go index 08c7dfe11fa..796565b864b 100644 --- a/vendor/github.com/diskfs/go-diskfs/disk/disk.go +++ b/vendor/github.com/diskfs/go-diskfs/disk/disk.go @@ -13,6 +13,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/diskfs/go-diskfs/filesystem" + "github.com/diskfs/go-diskfs/filesystem/ext4" "github.com/diskfs/go-diskfs/filesystem/fat32" "github.com/diskfs/go-diskfs/filesystem/iso9660" "github.com/diskfs/go-diskfs/filesystem/squashfs" @@ -185,6 +186,8 @@ func (d *Disk) CreateFilesystem(spec FilesystemSpec) (filesystem.FileSystem, err return fat32.Create(d.File, size, start, d.LogicalBlocksize, spec.VolumeLabel) case filesystem.TypeISO9660: return iso9660.Create(d.File, size, start, d.LogicalBlocksize, spec.WorkDir) + case filesystem.TypeExt4: + return ext4.Create(d.File, size, start, d.LogicalBlocksize, nil) case filesystem.TypeSquashfs: return nil, errors.New("squashfs is a read-only filesystem") default: @@ -244,5 +247,20 @@ func (d *Disk) GetFilesystem(part int) (filesystem.FileSystem, error) { if err == nil { return squashFS, nil } + log.Debug("trying ext4") + ext4FS, err := ext4.Read(d.File, size, start, d.LogicalBlocksize) + if err == nil { + return ext4FS, nil + } + log.Debugf("ext4 failed: %v", err) return nil, fmt.Errorf("unknown filesystem on partition %d", part) } + +// Close the disk. Once successfully closed, it can no longer be used. +func (d *Disk) Close() error { + if err := d.File.Close(); err != nil { + return err + } + *d = Disk{} + return nil +} diff --git a/vendor/github.com/diskfs/go-diskfs/disk/disk_wasip1.go b/vendor/github.com/diskfs/go-diskfs/disk/disk_wasip1.go new file mode 100644 index 00000000000..6a290d1999a --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/disk/disk_wasip1.go @@ -0,0 +1,10 @@ +//go:build wasip1 +// +build wasip1 + +package disk + +import "errors" + +func (d *Disk) ReReadPartitionTable() error { + return errors.New("not implemented") +} diff --git a/vendor/github.com/diskfs/go-diskfs/diskfs.go b/vendor/github.com/diskfs/go-diskfs/diskfs.go index f2e39cf2c9e..f0952a09650 100644 --- a/vendor/github.com/diskfs/go-diskfs/diskfs.go +++ b/vendor/github.com/diskfs/go-diskfs/diskfs.go @@ -335,7 +335,7 @@ func Open(device string, opts ...OpenOpt) (*disk.Disk, error) { // Create a Disk from a path to a device // Should pass a path to a block device e.g. /dev/sda or a path to a file /tmp/foo.img // The provided device must not exist at the time you call Create() -func Create(device string, size int64, format Format, sectorSize SectorSize) (*disk.Disk, error) { +func Create(device string, size int64, _ Format, sectorSize SectorSize) (*disk.Disk, error) { if device == "" { return nil, errors.New("must pass device name") } @@ -344,11 +344,11 @@ func Create(device string, size int64, format Format, sectorSize SectorSize) (*d } f, err := os.OpenFile(device, os.O_RDWR|os.O_EXCL|os.O_CREATE, 0o666) if err != nil { - return nil, fmt.Errorf("could not create device %s: %v", device, errors.Unwrap(err)) + return nil, fmt.Errorf("could not create device %s: %w", device, err) } err = os.Truncate(device, size) if err != nil { - return nil, fmt.Errorf("could not expand device %s to size %d: %v", device, size, errors.Unwrap(err)) + return nil, fmt.Errorf("could not expand device %s to size %d: %w", device, size, err) } // return our disk return initDisk(f, ReadWriteExclusive, sectorSize) diff --git a/vendor/github.com/diskfs/go-diskfs/diskfs_other.go b/vendor/github.com/diskfs/go-diskfs/diskfs_other.go index 95f1f487f9f..16208aea9cc 100644 --- a/vendor/github.com/diskfs/go-diskfs/diskfs_other.go +++ b/vendor/github.com/diskfs/go-diskfs/diskfs_other.go @@ -8,11 +8,11 @@ import ( ) // getBlockDeviceSize get the size of an opened block device in Bytes. -func getBlockDeviceSize(f *os.File) (int64, error) { +func getBlockDeviceSize(_ *os.File) (int64, error) { return 0, errors.New("block devices not supported on this platform") } // getSectorSizes get the logical and physical sector sizes for a block device -func getSectorSizes(f *os.File) (logicalSectorSize, physicalSectorSize int64, err error) { +func getSectorSizes(_ *os.File) (logicalSectorSize, physicalSectorSize int64, err error) { return 0, 0, errors.New("block devices not supported on this platform") } diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/blockgroup.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/blockgroup.go new file mode 100644 index 00000000000..bf3b426d9af --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/blockgroup.go @@ -0,0 +1,55 @@ +package ext4 + +import ( + "fmt" + + "github.com/diskfs/go-diskfs/util" +) + +// blockGroup is a structure holding the data about a single block group +// +//nolint:unused // will be used in the future, not yet +type blockGroup struct { + inodeBitmap *util.Bitmap + blockBitmap *util.Bitmap + blockSize int + number int + inodeTableSize int + firstDataBlock int +} + +// blockGroupFromBytes create a blockGroup struct from bytes +// it does not load the inode table or data blocks into memory, rather holding pointers to where they are +// +//nolint:unused // will be used in the future, not yet +func blockGroupFromBytes(b []byte, blockSize, groupNumber int) (*blockGroup, error) { + expectedSize := 2 * blockSize + actualSize := len(b) + if actualSize != expectedSize { + return nil, fmt.Errorf("expected to be passed %d bytes for 2 blocks of size %d, instead received %d", expectedSize, blockSize, actualSize) + } + inodeBitmap := util.BitmapFromBytes(b[0:blockSize]) + blockBitmap := util.BitmapFromBytes(b[blockSize : 2*blockSize]) + + bg := blockGroup{ + inodeBitmap: inodeBitmap, + blockBitmap: blockBitmap, + number: groupNumber, + blockSize: blockSize, + } + return &bg, nil +} + +// toBytes returns bitmaps ready to be written to disk +// +//nolint:unused // will be used in the future, not yet +func (bg *blockGroup) toBytes() ([]byte, error) { + b := make([]byte, 2*bg.blockSize) + inodeBitmapBytes := bg.inodeBitmap.ToBytes() + blockBitmapBytes := bg.blockBitmap.ToBytes() + + b = append(b, inodeBitmapBytes...) + b = append(b, blockBitmapBytes...) + + return b, nil +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/checksum.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/checksum.go new file mode 100644 index 00000000000..d7ffea43749 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/checksum.go @@ -0,0 +1,50 @@ +package ext4 + +import ( + "encoding/binary" + + "github.com/diskfs/go-diskfs/filesystem/ext4/crc" +) + +// checksumAppender is a function that takes a byte slice and returns a byte slice with a checksum appended +type checksumAppender func([]byte) []byte +type checksummer func([]byte) uint32 + +// directoryChecksummer returns a function that implements checksumAppender for a directory entries block +// original calculations can be seen for e2fsprogs https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/ext2fs/csum.c#n301 +// and in the linux tree https://github.com/torvalds/linux/blob/master/fs/ext4/namei.c#L376-L384 +func directoryChecksummer(seed, inodeNumber, inodeGeneration uint32) checksummer { + numBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(numBytes, inodeNumber) + crcResult := crc.CRC32c(seed, numBytes) + genBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(genBytes, inodeGeneration) + crcResult = crc.CRC32c(crcResult, genBytes) + return func(b []byte) uint32 { + checksum := crc.CRC32c(crcResult, b) + return checksum + } +} + +// directoryChecksumAppender returns a function that implements checksumAppender for a directory entries block +// original calculations can be seen for e2fsprogs https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/ext2fs/csum.c#n301 +// and in the linux tree https://github.com/torvalds/linux/blob/master/fs/ext4/namei.c#L376-L384 +// +//nolint:unparam // inodeGeneration is always 0 +func directoryChecksumAppender(seed, inodeNumber, inodeGeneration uint32) checksumAppender { + fn := directoryChecksummer(seed, inodeNumber, inodeGeneration) + return func(b []byte) []byte { + checksum := fn(b) + checksumBytes := make([]byte, 12) + checksumBytes[4] = 12 + checksumBytes[7] = 0xde + binary.LittleEndian.PutUint32(checksumBytes[8:12], checksum) + b = append(b, checksumBytes...) + return b + } +} + +// nullDirectoryChecksummer does not change anything +func nullDirectoryChecksummer(b []byte) []byte { + return b +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/consts.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/consts.go new file mode 100644 index 00000000000..2295aa08264 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/consts.go @@ -0,0 +1,5 @@ +package ext4 + +const ( + maxUint16 uint64 = 1<<16 - 1 +) diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc16.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc16.go new file mode 100644 index 00000000000..b8c37882fa9 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc16.go @@ -0,0 +1,44 @@ +package crc + +var crc16tab = [256]uint16{ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0} + +func CRC16(crc uint16, bs []byte) uint16 { + l := len(bs) + for i := 0; i < l; i++ { + crc = ((crc << 8) & 0xff00) ^ crc16tab[((crc>>8)&0xff)^uint16(bs[i])] + } + + return crc +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc32.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc32.go new file mode 100644 index 00000000000..70b44b0e33e --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/crc/crc32.go @@ -0,0 +1,74 @@ +package crc + +import ( + "encoding/binary" + "hash/crc32" +) + +// Define the CRC32C table using the Castagnoli polynomial +var ( + crc32cTable = crc32.MakeTable(crc32.Castagnoli) + crc32cTables = generateTables(crc32cTable) +) + +func generateTables(poly *crc32.Table) [8][256]uint32 { + var tab [8][256]uint32 + tab[0] = *poly + + for i := 0; i < 256; i++ { + crc := tab[0][i] + for j := 1; j < 8; j++ { + crc = (crc >> 8) ^ tab[0][crc&0xff] + tab[j][i] = crc + } + } + + return tab +} + +func CRC32c(base uint32, b []byte) uint32 { + // Compute the CRC32C checksum + // for reasons unknown, the checksum from go package hash/crc32, using crc32.Update(), is different from the one calculated by the kernel + // so we use this + return crc32Body(base, b, &crc32cTables) +} + +// doCRC processes a single byte +func doCRC(crc uint32, x byte, tab *[256]uint32) uint32 { + return tab[(crc^uint32(x))&0xff] ^ (crc >> 8) +} + +// doCRC4 processes 4 bytes +func doCRC4(q uint32, tab *[8][256]uint32) uint32 { + return tab[3][q&0xff] ^ tab[2][(q>>8)&0xff] ^ tab[1][(q>>16)&0xff] ^ tab[0][(q>>24)&0xff] +} + +// doCRC8 processes 8 bytes +func doCRC8(q uint32, tab *[8][256]uint32) uint32 { + return tab[7][q&0xff] ^ tab[6][(q>>8)&0xff] ^ tab[5][(q>>16)&0xff] ^ tab[4][(q>>24)&0xff] +} + +func crc32Body(crc uint32, buf []byte, tab *[8][256]uint32) uint32 { + // Align it + for len(buf) > 0 && (uintptr(len(buf))&3) != 0 { + crc = doCRC(crc, buf[0], &tab[0]) + buf = buf[1:] + } + + // Process in chunks of 8 bytes + remLen := len(buf) % 8 + for len(buf) >= 8 { + q := crc ^ binary.LittleEndian.Uint32(buf[:4]) + crc = doCRC8(q, tab) + q = binary.LittleEndian.Uint32(buf[4:8]) + crc ^= doCRC4(q, tab) + buf = buf[8:] + } + + // Process remaining bytes + for _, b := range buf[:remLen] { + crc = doCRC(crc, b, &tab[0]) + } + + return crc +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directory.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directory.go new file mode 100644 index 00000000000..24535f0297d --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directory.go @@ -0,0 +1,211 @@ +package ext4 + +import ( + "bytes" + "encoding/binary" + "fmt" +) + +const ( + directoryHashTreeRootMinSize = 0x28 + directoryHashTreeNodeMinSize = 0x12 +) + +// Directory represents a single directory in an ext4 filesystem +type Directory struct { + directoryEntry + root bool + entries []*directoryEntry +} + +// toBytes convert our entries to raw bytes. Provides checksum as well. Final returned byte slice will be a multiple of bytesPerBlock. +func (d *Directory) toBytes(bytesPerBlock uint32, checksumFunc checksumAppender) []byte { + b := make([]byte, 0) + var ( + previousLength int + previousEntry *directoryEntry + lastEntryCount int + block []byte + ) + if len(d.entries) == 0 { + return b + } + lastEntryCount = len(d.entries) - 1 + for i, de := range d.entries { + b2 := de.toBytes(0) + switch { + case len(block)+len(b2) > int(bytesPerBlock)-minDirEntryLength: + // if adding this one will go past the end of the block, pad out the previous + block = block[:len(block)-previousLength] + previousB := previousEntry.toBytes(uint16(int(bytesPerBlock) - len(block) - minDirEntryLength)) + block = append(block, previousB...) + // add the checksum + block = checksumFunc(block) + b = append(b, block...) + // start a new block + block = make([]byte, 0) + case i == lastEntryCount: + // if this is the last one, pad it out + b2 = de.toBytes(uint16(int(bytesPerBlock) - len(block) - minDirEntryLength)) + block = append(block, b2...) + // add the checksum + block = checksumFunc(block) + b = append(b, block...) + // start a new block + block = make([]byte, 0) + default: + block = append(block, b2...) + } + previousLength = len(b2) + previousEntry = de + } + remainder := len(b) % int(bytesPerBlock) + if remainder > 0 { + extra := int(bytesPerBlock) - remainder + zeroes := make([]byte, extra) + b = append(b, zeroes...) + } + return b +} + +type directoryHashEntry struct { + hash uint32 + block uint32 +} + +type dxNode interface { + entries() []directoryHashEntry +} + +type directoryHashNode struct { + childEntries []directoryHashEntry +} + +func (d *directoryHashNode) entries() []directoryHashEntry { + return d.childEntries +} + +type directoryHashRoot struct { + inodeDir uint32 + inodeParent uint32 + hashVersion hashVersion + depth uint8 + hashAlgorithm hashAlgorithm + childEntries []directoryHashEntry + dotEntry *directoryEntry + dotDotEntry *directoryEntry +} + +func (d *directoryHashRoot) entries() []directoryHashEntry { + return d.childEntries +} + +// parseDirectoryTreeRoot parses the directory hash tree root from the given byte slice. Reads only the root node. +func parseDirectoryTreeRoot(b []byte, largeDir bool) (node *directoryHashRoot, err error) { + // min size + if len(b) < directoryHashTreeRootMinSize { + return nil, fmt.Errorf("directory hash tree root is too small") + } + + // dot parameters + dotInode := binary.LittleEndian.Uint32(b[0x0:0x4]) + dotSize := binary.LittleEndian.Uint16(b[0x4:0x6]) + if dotSize != 12 { + return nil, fmt.Errorf("directory hash tree root dot size is %d and not 12", dotSize) + } + dotNameSize := b[0x6] + if dotNameSize != 1 { + return nil, fmt.Errorf("directory hash tree root dot name length is %d and not 1", dotNameSize) + } + dotFileType := directoryFileType(b[0x7]) + if dotFileType != dirFileTypeDirectory { + return nil, fmt.Errorf("directory hash tree root dot file type is %d and not %v", dotFileType, dirFileTypeDirectory) + } + dotName := b[0x8:0xc] + if !bytes.Equal(dotName, []byte{'.', 0, 0, 0}) { + return nil, fmt.Errorf("directory hash tree root dot name is %s and not '.'", dotName) + } + + // dotdot parameters + dotdotInode := binary.LittleEndian.Uint32(b[0xc:0x10]) + dotdotNameSize := b[0x12] + if dotdotNameSize != 2 { + return nil, fmt.Errorf("directory hash tree root dotdot name length is %d and not 2", dotdotNameSize) + } + dotdotFileType := directoryFileType(b[0x13]) + if dotdotFileType != dirFileTypeDirectory { + return nil, fmt.Errorf("directory hash tree root dotdot file type is %d and not %v", dotdotFileType, dirFileTypeDirectory) + } + dotdotName := b[0x14:0x18] + if !bytes.Equal(dotdotName, []byte{'.', '.', 0, 0}) { + return nil, fmt.Errorf("directory hash tree root dotdot name is %s and not '..'", dotdotName) + } + + treeInformation := b[0x1d] + if treeInformation != 8 { + return nil, fmt.Errorf("directory hash tree root tree information is %d and not 8", treeInformation) + } + treeDepth := b[0x1e] + // there are maximums for this + maxTreeDepth := uint8(2) + if largeDir { + maxTreeDepth = 3 + } + if treeDepth > maxTreeDepth { + return nil, fmt.Errorf("directory hash tree root tree depth is %d and not between 0 and %d", treeDepth, maxTreeDepth) + } + + dxEntriesCount := binary.LittleEndian.Uint16(b[0x22:0x24]) + + node = &directoryHashRoot{ + inodeDir: binary.LittleEndian.Uint32(b[0x0:0x4]), + inodeParent: binary.LittleEndian.Uint32(b[0xC:0x10]), + hashAlgorithm: hashAlgorithm(b[0x1c]), // what hashing algorithm is used? + depth: treeDepth, + childEntries: make([]directoryHashEntry, 0, int(dxEntriesCount)), + dotEntry: &directoryEntry{ + inode: dotInode, + fileType: dotFileType, + filename: ".", + }, + dotDotEntry: &directoryEntry{ + inode: dotdotInode, + fileType: dotdotFileType, + filename: "..", + }, + } + + // remove 1, because the count includes the one in the dx_root itself + node.childEntries = append(node.childEntries, directoryHashEntry{hash: 0, block: binary.LittleEndian.Uint32(b[0x24:0x28])}) + for i := 0; i < int(dxEntriesCount)-1; i++ { + entryOffset := 0x28 + (i * 8) + hash := binary.LittleEndian.Uint32(b[entryOffset : entryOffset+4]) + block := binary.LittleEndian.Uint32(b[entryOffset+4 : entryOffset+8]) + node.childEntries = append(node.childEntries, directoryHashEntry{hash: hash, block: block}) + } + + return node, nil +} + +// parseDirectoryTreeNode parses an internal directory hash tree node from the given byte slice. Reads only the node. +func parseDirectoryTreeNode(b []byte) (node *directoryHashNode, err error) { + // min size + if len(b) < directoryHashTreeNodeMinSize { + return nil, fmt.Errorf("directory hash tree root is too small") + } + + dxEntriesCount := binary.LittleEndian.Uint16(b[0xa:0xc]) + + node = &directoryHashNode{ + childEntries: make([]directoryHashEntry, 0, int(dxEntriesCount)), + } + node.childEntries = append(node.childEntries, directoryHashEntry{hash: 0, block: binary.LittleEndian.Uint32(b[0xc:0x10])}) + for i := 0; i < int(dxEntriesCount)-1; i++ { + entryOffset := 0x10 + (i * 8) + hash := binary.LittleEndian.Uint32(b[entryOffset : entryOffset+4]) + block := binary.LittleEndian.Uint32(b[entryOffset+4 : entryOffset+8]) + node.childEntries = append(node.childEntries, directoryHashEntry{hash: hash, block: block}) + } + + return node, nil +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directoryentry.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directoryentry.go new file mode 100644 index 00000000000..295469348e2 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/directoryentry.go @@ -0,0 +1,176 @@ +package ext4 + +import ( + "encoding/binary" + "fmt" +) + +// directoryFileType uses different constants than the file type property in the inode +type directoryFileType uint8 + +const ( + minDirEntryLength int = 12 // actually 9 for 1-byte file length, but must be multiple of 4 bytes + maxDirEntryLength int = 263 + + // directory file types + dirFileTypeUnknown directoryFileType = 0x0 + dirFileTypeRegular directoryFileType = 0x1 + dirFileTypeDirectory directoryFileType = 0x2 + dirFileTypeCharacter directoryFileType = 0x3 + dirFileTypeBlock directoryFileType = 0x4 + dirFileTypeFifo directoryFileType = 0x5 + dirFileTypeSocket directoryFileType = 0x6 + dirFileTypeSymlink directoryFileType = 0x7 +) + +// directoryEntry is a single directory entry +type directoryEntry struct { + inode uint32 + filename string + fileType directoryFileType +} + +func (de *directoryEntry) equal(other *directoryEntry) bool { + return de.inode == other.inode && de.filename == other.filename && de.fileType == other.fileType +} + +func directoryEntryFromBytes(b []byte) (*directoryEntry, error) { + if len(b) < minDirEntryLength { + return nil, fmt.Errorf("directory entry of length %d is less than minimum %d", len(b), minDirEntryLength) + } + if len(b) > maxDirEntryLength { + b = b[:maxDirEntryLength] + } + + //nolint:gocritic // keep this here for future reference + // length := binary.LittleEndian.Uint16(b[0x4:0x6]) + nameLength := b[0x6] + name := b[0x8 : 0x8+nameLength] + de := directoryEntry{ + inode: binary.LittleEndian.Uint32(b[0x0:0x4]), + fileType: directoryFileType(b[0x7]), + filename: string(name), + } + return &de, nil +} + +func directoryEntriesChecksumFromBytes(b []byte) (checksum uint32, err error) { + if len(b) != minDirEntryLength { + return checksum, fmt.Errorf("directory entry checksum of length %d is not required %d", len(b), minDirEntryLength) + } + inode := binary.LittleEndian.Uint32(b[0x0:0x4]) + if inode != 0 { + return checksum, fmt.Errorf("directory entry checksum inode is not 0") + } + length := binary.LittleEndian.Uint16(b[0x4:0x6]) + if int(length) != minDirEntryLength { + return checksum, fmt.Errorf("directory entry checksum length is not %d", minDirEntryLength) + } + nameLength := b[0x6] + if nameLength != 0 { + return checksum, fmt.Errorf("directory entry checksum name length is not 0") + } + fileType := b[0x7] + if fileType != 0xde { + return checksum, fmt.Errorf("directory entry checksum file type is not set to reserved 0xde") + } + return binary.LittleEndian.Uint32(b[0x8:0xc]), nil +} + +// toBytes convert a directoryEntry to bytes. If isLast, then the size recorded is the number of bytes +// from beginning of directory entry to end of block, minus the amount left for the checksum. +func (de *directoryEntry) toBytes(withSize uint16) []byte { + // it must be the header length + filename length rounded up to nearest multiple of 4 + nameLength := uint8(len(de.filename)) + entryLength := uint16(nameLength) + 8 + if leftover := entryLength % 4; leftover > 0 { + entryLength += (4 - leftover) + } + + if withSize > 0 { + entryLength = withSize + } + b := make([]byte, entryLength) + binary.LittleEndian.PutUint32(b[0x0:0x4], de.inode) + binary.LittleEndian.PutUint16(b[0x4:0x6], entryLength) + b[0x6] = nameLength + b[0x7] = byte(de.fileType) + copy(b[0x8:], de.filename) + + return b +} + +func parseDirEntriesLinear(b []byte, withChecksums bool, blocksize, inodeNumber, inodeGeneration, checksumSeed uint32) ([]*directoryEntry, error) { + // checksum if needed + if withChecksums { + var ( + newb []byte + checksumEntryOffset = int(blocksize) - minDirEntryLength + checksumOffset = int(blocksize) - 4 + ) + checksummer := directoryChecksummer(checksumSeed, inodeNumber, inodeGeneration) + for i := 0; i < len(b); i += int(blocksize) { + block := b[i : i+int(blocksize)] + inBlockChecksum := block[checksumOffset:] + block = block[:checksumEntryOffset] + // save everything except the checksum + newb = append(newb, block...) + // checksum the entire block + checksumValue := binary.LittleEndian.Uint32(inBlockChecksum) + // checksum the block + actualChecksum := checksummer(block) + if actualChecksum != checksumValue { + return nil, fmt.Errorf("directory block checksum mismatch: expected %x, got %x", checksumValue, actualChecksum) + } + } + b = newb + } + + // convert into directory entries + entries := make([]*directoryEntry, 0, 4) + count := 0 + for i := 0; i < len(b); count++ { + // read the length of the entry + length := binary.LittleEndian.Uint16(b[i+0x4 : i+0x6]) + de, err := directoryEntryFromBytes(b[i : i+int(length)]) + if err != nil { + return nil, fmt.Errorf("failed to parse directory entry %d: %v", count, err) + } + entries = append(entries, de) + i += int(length) + } + return entries, nil +} + +// parseDirEntriesHashed parse hashed data blocks to get directory entries. +// If hashedName is 0, returns all directory entries; otherwise, returns a slice with a single entry with the given name. +func parseDirEntriesHashed(b []byte, depth uint8, node dxNode, blocksize uint32, withChecksums bool, inodeNumber, inodeGeneration, checksumSeed uint32) (dirEntries []*directoryEntry, err error) { + for _, entry := range node.entries() { + var ( + addDirEntries []*directoryEntry + start = entry.block * blocksize + end = start + blocksize + ) + + nextBlock := b[start:end] + if depth == 0 { + addDirEntries, err = parseDirEntriesLinear(nextBlock, withChecksums, blocksize, inodeNumber, inodeGeneration, checksumSeed) + if err != nil { + return nil, fmt.Errorf("error parsing linear directory entries: %w", err) + } + } else { + // recursively parse the next level of the tree + // read the next level down + node, err := parseDirectoryTreeNode(nextBlock) + if err != nil { + return nil, fmt.Errorf("error parsing directory tree node: %w", err) + } + addDirEntries, err = parseDirEntriesHashed(b, depth-1, node, blocksize, withChecksums, inodeNumber, inodeGeneration, checksumSeed) + if err != nil { + return nil, fmt.Errorf("error parsing hashed directory entries: %w", err) + } + } + dirEntries = append(dirEntries, addDirEntries...) + } + return dirEntries, nil +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/dirhash.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/dirhash.go new file mode 100644 index 00000000000..8717c34136e --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/dirhash.go @@ -0,0 +1,157 @@ +package ext4 + +import ( + "github.com/diskfs/go-diskfs/filesystem/ext4/md4" +) + +const ( + teaDelta uint32 = 0x9E3779B9 + k1 uint32 = 0 + k2 uint32 = 0o13240474631 + k3 uint32 = 0o15666365641 + ext4HtreeEOF32 uint32 = ((1 << (32 - 1)) - 1) + ext4HtreeEOF64 uint64 = ((1 << (64 - 1)) - 1) +) + +type hashVersion uint8 + +const ( + HashVersionLegacy = 0 + HashVersionHalfMD4 = 1 + HashVersionTEA = 2 + HashVersionLegacyUnsigned = 3 + HashVersionHalfMD4Unsigned = 4 + HashVersionTEAUnsigned = 5 + HashVersionSIP = 6 +) + +func TEATransform(buf [4]uint32, in []uint32) [4]uint32 { + var sum uint32 + var b0, b1 = buf[0], buf[1] + var a, b, c, d = in[0], in[1], in[2], in[3] + var n = 16 + + for ; n > 0; n-- { + sum += teaDelta + b0 += ((b1 << 4) + a) ^ (b1 + sum) ^ ((b1 >> 5) + b) + b1 += ((b0 << 4) + c) ^ (b0 + sum) ^ ((b0 >> 5) + d) + } + + buf[0] += b0 + buf[1] += b1 + return buf +} + +// the old legacy hash +// +//nolint:unparam,revive // we do not used signed, but we probably should, so leaving until we are sure +func dxHackHash(name string, signed bool) uint32 { + var hash uint32 + var hash0, hash1 uint32 = 0x12a3fe2d, 0x37abe8f9 + b := []byte(name) + + for i := len(b); i > 0; i-- { + // get the specific character + c := int(b[i-1]) + // the value of the individual character depends on if it is signed or not + hash = hash1 + (hash0 ^ uint32(c*7152373)) + + if hash&0x80000000 != 0 { + hash -= 0x7fffffff + } + hash1 = hash0 + hash0 = hash + } + return hash0 << 1 +} + +//nolint:unparam,revive // we do not used signed, but we probably should, so leaving until we are sure +func str2hashbuf(msg string, num int, signed bool) []uint32 { + var buf [8]uint32 + var pad, val uint32 + b := []byte(msg) + size := len(b) + + pad = uint32(size) | (uint32(size) << 8) + pad |= pad << 16 + + val = pad + if size > num*4 { + size = num * 4 + } + var j int + for i := 0; i < size; i++ { + c := int(b[i]) + val = uint32(c) + (val << 8) + if (i % 4) == 3 { + buf[j] = val + val = pad + num-- + j++ + } + } + num-- + if num >= 0 { + buf[j] = val + j++ + } + for num--; num >= 0; num-- { + buf[j] = pad + j++ + } + return buf[:] +} + +func ext4fsDirhash(name string, version hashVersion, seed []uint32) (hash, minorHash uint32) { + /* Initialize the default seed for the hash checksum functions */ + var buf = [4]uint32{0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476} + + // Check to see if the seed is all zero, and if so, use the default + for i, val := range seed { + if val != 0 { + buf[i] = val + } + } + + switch version { + case HashVersionLegacyUnsigned: + hash = dxHackHash(name, false) + case HashVersionLegacy: + hash = dxHackHash(name, true) + case HashVersionHalfMD4Unsigned: + for i := 0; i < len(name); i += 32 { + in := str2hashbuf(name[i:], 8, false) + buf[1] = md4.HalfMD4Transform(buf, in) + } + minorHash = buf[2] + hash = buf[1] + case HashVersionHalfMD4: + for i := 0; i < len(name); i += 32 { + in := str2hashbuf(name[i:], 8, true) + buf[1] = md4.HalfMD4Transform(buf, in) + } + minorHash = buf[2] + hash = buf[1] + case HashVersionTEAUnsigned: + for i := 0; i < len(name); i += 16 { + in := str2hashbuf(name[i:], 4, false) + buf = TEATransform(buf, in) + } + hash = buf[0] + minorHash = buf[1] + case HashVersionTEA: + for i := 0; i < len(name); i += 16 { + in := str2hashbuf(name[i:], 4, true) + buf = TEATransform(buf, in) + } + hash = buf[0] + minorHash = buf[1] + default: + return 0, 0 + } + hash &= ^uint32(1) + if hash == (ext4HtreeEOF32 << 1) { + hash = (ext4HtreeEOF32 - 1) << 1 + } + return hash, minorHash +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.go new file mode 100644 index 00000000000..b322dd7356d --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.go @@ -0,0 +1,1748 @@ +package ext4 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + iofs "io/fs" + "math" + "os" + "path" + "sort" + "strings" + "time" + + "github.com/diskfs/go-diskfs/filesystem" + "github.com/diskfs/go-diskfs/filesystem/ext4/crc" + "github.com/diskfs/go-diskfs/util" + "github.com/google/uuid" +) + +// SectorSize indicates what the sector size in bytes is +type SectorSize uint16 + +// BlockSize indicates how many sectors are in a block +type BlockSize uint8 + +// BlockGroupSize indicates how many blocks are in a group, standardly 8*block_size_in_bytes + +const ( + // SectorSize512 is a sector size of 512 bytes, used as the logical size for all ext4 filesystems + SectorSize512 SectorSize = 512 + minBlocksPerGroup uint32 = 256 + BootSectorSize SectorSize = 2 * SectorSize512 + SuperblockSize SectorSize = 2 * SectorSize512 + BlockGroupFactor int = 8 + DefaultInodeRatio int64 = 8192 + DefaultInodeSize int64 = 256 + DefaultReservedBlocksPercent uint8 = 5 + DefaultVolumeName = "diskfs_ext4" + minClusterSize int = 128 + maxClusterSize int = 65529 + bytesPerSlot int = 32 + maxCharsLongFilename int = 13 + maxBlocksPerExtent uint16 = 32768 + million int = 1000000 + billion int = 1000 * million + firstNonReservedInode uint32 = 11 // traditional + + minBlockLogSize int = 10 /* 1024 */ + maxBlockLogSize int = 16 /* 65536 */ + minBlockSize int = (1 << minBlockLogSize) + maxBlockSize int = (1 << maxBlockLogSize) + + max32Num uint64 = math.MaxUint32 + max64Num uint64 = math.MaxUint64 + + maxFilesystemSize32Bit uint64 = 16*2 ^ 40 + maxFilesystemSize64Bit uint64 = 1*2 ^ 60 + + checksumType uint8 = 1 + + // default for log groups per flex group + defaultLogGroupsPerFlex int = 3 + + // fixed inodes + rootInode uint32 = 2 + userQuotaInode uint32 = 3 + groupQuotaInode uint32 = 4 + journalInode uint32 = 8 + lostFoundInode = 11 // traditional +) + +type Params struct { + UUID *uuid.UUID + SectorsPerBlock uint8 + BlocksPerGroup uint32 + InodeRatio int64 + InodeCount uint32 + SparseSuperVersion uint8 + Checksum bool + ClusterSize int64 + ReservedBlocksPercent uint8 + VolumeName string + // JournalDevice external journal device, only checked if WithFeatureSeparateJournalDevice(true) is set + JournalDevice string + LogFlexBlockGroups int + Features []FeatureOpt + DefaultMountOpts []MountOpt +} + +// FileSystem implememnts the FileSystem interface +type FileSystem struct { + bootSector []byte + superblock *superblock + groupDescriptors *groupDescriptors + blockGroups int64 + size int64 + start int64 + file util.File +} + +// Equal compare if two filesystems are equal +func (fs *FileSystem) Equal(a *FileSystem) bool { + localMatch := fs.file == a.file + sbMatch := fs.superblock.equal(a.superblock) + gdMatch := fs.groupDescriptors.equal(a.groupDescriptors) + return localMatch && sbMatch && gdMatch +} + +// Create creates an ext4 filesystem in a given file or device +// +// requires the util.File where to create the filesystem, size is the size of the filesystem in bytes, +// start is how far in bytes from the beginning of the util.File to create the filesystem, +// and blocksize is is the logical blocksize to use for creating the filesystem +// +// note that you are *not* required to create the filesystem on the entire disk. You could have a disk of size +// 20GB, and create a small filesystem of size 50MB that begins 2GB into the disk. +// This is extremely useful for creating filesystems on disk partitions. +// +// Note, however, that it is much easier to do this using the higher-level APIs at github.com/diskfs/go-diskfs +// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors) +// where a partition starts and ends. +// +// If the provided blocksize is 0, it will use the default of 512 bytes. If it is any number other than 0 +// or 512, it will return an error. +// +//nolint:gocyclo // yes, this has high cyclomatic complexity, but we can accept it +func Create(f util.File, size, start, sectorsize int64, p *Params) (*FileSystem, error) { + // be safe about the params pointer + if p == nil { + p = &Params{} + } + + // sectorsize must be <=0 or exactly SectorSize512 or error + // because of this, we know we can scale it down to a uint32, since it only can be 512 bytes + if sectorsize != int64(SectorSize512) && sectorsize > 0 { + return nil, fmt.Errorf("sectorsize for ext4 must be either 512 bytes or 0, not %d", sectorsize) + } + var sectorsize32 = uint32(sectorsize) + // there almost are no limits on an ext4 fs - theoretically up to 1 YB + // but we do have to check the max and min size per the requested parameters + // if size < minSizeGivenParameters { + // return nil, fmt.Errorf("requested size is smaller than minimum allowed ext4 size %d for given parameters", minSizeGivenParameters*4) + // } + // if size > maxSizeGivenParameters { + // return nil, fmt.Errorf("requested size is bigger than maximum ext4 size %d for given parameters", maxSizeGivenParameters*4) + // } + + // uuid + fsuuid := p.UUID + if fsuuid == nil { + fsuuid2, _ := uuid.NewRandom() + fsuuid = &fsuuid2 + } + + // blocksize + sectorsPerBlock := p.SectorsPerBlock + userProvidedBlocksize := false + switch { + case sectorsPerBlock > 128 || sectorsPerBlock < 2: + return nil, fmt.Errorf("invalid sectors per block %d, must be between %d and %d sectors", sectorsPerBlock, 2, 128) + case sectorsPerBlock < 1: + sectorsPerBlock = 2 + default: + userProvidedBlocksize = true + } + blocksize := uint32(sectorsPerBlock) * sectorsize32 + + // how many whole blocks is that? + numblocks := size / int64(blocksize) + + // recalculate if it was not user provided + if !userProvidedBlocksize { + sectorsPerBlockR, blocksizeR, numblocksR := recalculateBlocksize(numblocks, size) + _, blocksize, numblocks = uint8(sectorsPerBlockR), blocksizeR, numblocksR + } + + // how many blocks in each block group (and therefore how many block groups) + // if not provided, by default it is 8*blocksize (in bytes) + blocksPerGroup := p.BlocksPerGroup + switch { + case blocksPerGroup <= 0: + blocksPerGroup = blocksize * 8 + case blocksPerGroup < minBlocksPerGroup: + return nil, fmt.Errorf("invalid number of blocks per group %d, must be at least %d", blocksPerGroup, minBlocksPerGroup) + case blocksPerGroup > 8*blocksize: + return nil, fmt.Errorf("invalid number of blocks per group %d, must be no larger than 8*blocksize of %d", blocksPerGroup, blocksize) + case blocksPerGroup%8 != 0: + return nil, fmt.Errorf("invalid number of blocks per group %d, must be divisible by 8", blocksPerGroup) + } + + // how many block groups do we have? + blockGroups := numblocks / int64(blocksPerGroup) + + // track how many free blocks we have + freeBlocks := numblocks + + clusterSize := p.ClusterSize + + // use our inode ratio to determine how many inodes we should have + inodeRatio := p.InodeRatio + if inodeRatio <= 0 { + inodeRatio = DefaultInodeRatio + } + if inodeRatio < int64(blocksize) { + inodeRatio = int64(blocksize) + } + if inodeRatio < clusterSize { + inodeRatio = clusterSize + } + + inodeCount := p.InodeCount + switch { + case inodeCount <= 0: + // calculate how many inodes are needed + inodeCount64 := (numblocks * int64(blocksize)) / inodeRatio + if uint64(inodeCount64) > max32Num { + return nil, fmt.Errorf("requested %d inodes, greater than max %d", inodeCount64, max32Num) + } + inodeCount = uint32(inodeCount64) + case uint64(inodeCount) > max32Num: + return nil, fmt.Errorf("requested %d inodes, greater than max %d", inodeCount, max32Num) + } + + inodesPerGroup := int64(inodeCount) / blockGroups + + // track how many free inodes we have + freeInodes := inodeCount + + // which blocks have superblock and GDT? + var ( + backupSuperblocks []int64 + backupSuperblockGroupsSparse [2]uint32 + ) + // 0 - primary + // ?? - backups + switch p.SparseSuperVersion { + case 2: + // backups in first and last block group + backupSuperblockGroupsSparse = [2]uint32{0, uint32(blockGroups) - 1} + backupSuperblocks = []int64{0, 1, blockGroups - 1} + default: + backupSuperblockGroups := calculateBackupSuperblockGroups(blockGroups) + backupSuperblocks = []int64{0} + for _, bg := range backupSuperblockGroups { + backupSuperblocks = append(backupSuperblocks, bg*int64(blocksPerGroup)) + } + } + + freeBlocks -= int64(len(backupSuperblocks)) + + var firstDataBlock uint32 + if blocksize == 1024 { + firstDataBlock = 1 + } + + /* + size calculations + we have the total size of the disk from `size uint64` + we have the sectorsize fixed at SectorSize512 + + what do we need to determine or calculate? + - block size + - number of blocks + - number of block groups + - block groups for superblock and gdt backups + - in each block group: + - number of blocks in gdt + - number of reserved blocks in gdt + - number of blocks in inode table + - number of data blocks + + config info: + + [defaults] + base_features = sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr + default_mntopts = acl,user_xattr + enable_periodic_fsck = 0 + blocksize = 4096 + inode_size = 256 + inode_ratio = 16384 + + [fs_types] + ext3 = { + features = has_journal + } + ext4 = { + features = has_journal,extent,huge_file,flex_bg,uninit_bg,64bit,dir_nlink,extra_isize + inode_size = 256 + } + ext4dev = { + features = has_journal,extent,huge_file,flex_bg,uninit_bg,inline_data,64bit,dir_nlink,extra_isize + inode_size = 256 + options = test_fs=1 + } + small = { + blocksize = 1024 + inode_size = 128 + inode_ratio = 4096 + } + floppy = { + blocksize = 1024 + inode_size = 128 + inode_ratio = 8192 + } + big = { + inode_ratio = 32768 + } + huge = { + inode_ratio = 65536 + } + news = { + inode_ratio = 4096 + } + largefile = { + inode_ratio = 1048576 + blocksize = -1 + } + largefile4 = { + inode_ratio = 4194304 + blocksize = -1 + } + hurd = { + blocksize = 4096 + inode_size = 128 + } + */ + + // allocate root directory, single inode + freeInodes-- + + // how many reserved blocks? + reservedBlocksPercent := p.ReservedBlocksPercent + if reservedBlocksPercent <= 0 { + reservedBlocksPercent = DefaultReservedBlocksPercent + } + + // are checksums enabled? + gdtChecksumType := gdtChecksumNone + if p.Checksum { + gdtChecksumType = gdtChecksumMetadata + } + + // we do not yet support bigalloc + var clustersPerGroup = blocksPerGroup + + // inodesPerGroup: once we know how many inodes per group, and how many groups + // we will have the total inode count + + volumeName := p.VolumeName + if volumeName == "" { + volumeName = DefaultVolumeName + } + + fflags := defaultFeatureFlags + for _, flagopt := range p.Features { + flagopt(&fflags) + } + + mflags := defaultMiscFlags + + // generate hash seed + hashSeed, _ := uuid.NewRandom() + hashSeedBytes := hashSeed[:] + htreeSeed := make([]uint32, 0, 4) + htreeSeed = append(htreeSeed, + binary.LittleEndian.Uint32(hashSeedBytes[:4]), + binary.LittleEndian.Uint32(hashSeedBytes[4:8]), + binary.LittleEndian.Uint32(hashSeedBytes[8:12]), + binary.LittleEndian.Uint32(hashSeedBytes[12:16]), + ) + + // create a UUID for the journal + journalSuperblockUUID, _ := uuid.NewRandom() + + // group descriptor size could be 32 or 64, depending on option + var gdSize uint16 + if fflags.fs64Bit { + gdSize = groupDescriptorSize64Bit + } + + var firstMetaBG uint32 + if fflags.metaBlockGroups { + return nil, fmt.Errorf("meta block groups not yet supported") + } + + // calculate the maximum number of block groups + // maxBlockGroups = (maxFSSize) / (blocksPerGroup * blocksize) + var ( + maxBlockGroups uint64 + ) + if fflags.fs64Bit { + maxBlockGroups = maxFilesystemSize64Bit / (uint64(blocksPerGroup) * uint64(blocksize)) + } else { + maxBlockGroups = maxFilesystemSize32Bit / (uint64(blocksPerGroup) * uint64(blocksize)) + } + reservedGDTBlocks := maxBlockGroups * 32 / maxBlockGroups + if reservedGDTBlocks > math.MaxUint16 { + return nil, fmt.Errorf("too many reserved blocks calculated for group descriptor table") + } + + var ( + journalDeviceNumber uint32 + err error + ) + if fflags.separateJournalDevice && p.JournalDevice != "" { + journalDeviceNumber, err = journalDevice(p.JournalDevice) + if err != nil { + return nil, fmt.Errorf("unable to get journal device: %w", err) + } + } + + // get default mount options + mountOptions := defaultMountOptionsFromOpts(p.DefaultMountOpts) + + // initial KB written. This must be adjusted over time to include: + // - superblock itself (1KB bytes) + // - GDT + // - block bitmap (1KB per block group) + // - inode bitmap (1KB per block group) + // - inode tables (inodes per block group * bytes per inode) + // - root directory + + // for now, we just make it 1024 = 1 KB + initialKB := 1024 + + // only set a project quota inode if the feature was enabled + var projectQuotaInode uint32 + if fflags.projectQuotas { + projectQuotaInode = lostFoundInode + 1 + freeInodes-- + } + + // how many log groups per flex group? Depends on if we have flex groups + logGroupsPerFlex := 0 + if fflags.flexBlockGroups { + logGroupsPerFlex = defaultLogGroupsPerFlex + if p.LogFlexBlockGroups > 0 { + logGroupsPerFlex = p.LogFlexBlockGroups + } + } + + // create the superblock - MUST ADD IN OPTIONS + now, epoch := time.Now(), time.Unix(0, 0) + sb := superblock{ + inodeCount: inodeCount, + blockCount: uint64(numblocks), + reservedBlocks: uint64(reservedBlocksPercent) / 100 * uint64(numblocks), + freeBlocks: uint64(freeBlocks), + freeInodes: freeInodes, + firstDataBlock: firstDataBlock, + blockSize: blocksize, + clusterSize: uint64(clusterSize), + blocksPerGroup: blocksPerGroup, + clustersPerGroup: clustersPerGroup, + inodesPerGroup: uint32(inodesPerGroup), + mountTime: now, + writeTime: now, + mountCount: 0, + mountsToFsck: 0, + filesystemState: fsStateCleanlyUnmounted, + errorBehaviour: errorsContinue, + minorRevision: 0, + lastCheck: now, + checkInterval: 0, + creatorOS: osLinux, + revisionLevel: 1, + reservedBlocksDefaultUID: 0, + reservedBlocksDefaultGID: 0, + firstNonReservedInode: firstNonReservedInode, + inodeSize: uint16(DefaultInodeSize), + blockGroup: 0, + features: fflags, + uuid: fsuuid, + volumeLabel: volumeName, + lastMountedDirectory: "/", + algorithmUsageBitmap: 0, // not used in Linux e2fsprogs + preallocationBlocks: 0, // not used in Linux e2fsprogs + preallocationDirectoryBlocks: 0, // not used in Linux e2fsprogs + reservedGDTBlocks: uint16(reservedGDTBlocks), + journalSuperblockUUID: &journalSuperblockUUID, + journalInode: journalInode, + journalDeviceNumber: journalDeviceNumber, + orphanedInodesStart: 0, + hashTreeSeed: htreeSeed, + hashVersion: hashHalfMD4, + groupDescriptorSize: gdSize, + defaultMountOptions: *mountOptions, + firstMetablockGroup: firstMetaBG, + mkfsTime: now, + journalBackup: nil, + // 64-bit mode features + inodeMinBytes: minInodeExtraSize, + inodeReserveBytes: wantInodeExtraSize, + miscFlags: mflags, + raidStride: 0, + multiMountPreventionInterval: 0, + multiMountProtectionBlock: 0, + raidStripeWidth: 0, + checksumType: checksumType, + totalKBWritten: uint64(initialKB), + errorCount: 0, + errorFirstTime: epoch, + errorFirstInode: 0, + errorFirstBlock: 0, + errorFirstFunction: "", + errorFirstLine: 0, + errorLastTime: epoch, + errorLastInode: 0, + errorLastLine: 0, + errorLastBlock: 0, + errorLastFunction: "", + mountOptions: "", // no mount options until it is mounted + backupSuperblockBlockGroups: backupSuperblockGroupsSparse, + lostFoundInode: lostFoundInode, + overheadBlocks: 0, + checksumSeed: crc.CRC32c(0, fsuuid[:]), // according to docs, this should be crc32c(~0, $orig_fs_uuid) + snapshotInodeNumber: 0, + snapshotID: 0, + snapshotReservedBlocks: 0, + snapshotStartInode: 0, + userQuotaInode: userQuotaInode, + groupQuotaInode: groupQuotaInode, + projectQuotaInode: projectQuotaInode, + logGroupsPerFlex: uint64(logGroupsPerFlex), + } + gdt := groupDescriptors{} + + b, err := sb.toBytes() + if err != nil { + return nil, fmt.Errorf("error converting Superblock to bytes: %v", err) + } + + g := gdt.toBytes(gdtChecksumType, sb.checksumSeed) + // how big should the GDT be? + gdSize = groupDescriptorSize + if sb.features.fs64Bit { + gdSize = groupDescriptorSize64Bit + } + gdtSize := int64(gdSize) * numblocks + // write the superblock and GDT to the various locations on disk + for _, bg := range backupSuperblocks { + block := bg * int64(blocksPerGroup) + blockStart := block * int64(blocksize) + // allow that the first one requires an offset + incr := int64(0) + if block == 0 { + incr = int64(SectorSize512) * 2 + } + + // write the superblock + count, err := f.WriteAt(b, incr+blockStart+start) + if err != nil { + return nil, fmt.Errorf("error writing Superblock for block %d to disk: %v", block, err) + } + if count != int(SuperblockSize) { + return nil, fmt.Errorf("wrote %d bytes of Superblock for block %d to disk instead of expected %d", count, block, SuperblockSize) + } + + // write the GDT + count, err = f.WriteAt(g, incr+blockStart+int64(SuperblockSize)+start) + if err != nil { + return nil, fmt.Errorf("error writing GDT for block %d to disk: %v", block, err) + } + if count != int(gdtSize) { + return nil, fmt.Errorf("wrote %d bytes of GDT for block %d to disk instead of expected %d", count, block, gdtSize) + } + } + + // create root directory + // there is nothing in there + return &FileSystem{ + bootSector: []byte{}, + superblock: &sb, + groupDescriptors: &gdt, + blockGroups: blockGroups, + size: size, + start: start, + file: f, + }, nil +} + +// Read reads a filesystem from a given disk. +// +// requires the util.File where to read the filesystem, size is the size of the filesystem in bytes, +// start is how far in bytes from the beginning of the util.File the filesystem is expected to begin, +// and blocksize is is the logical blocksize to use for creating the filesystem +// +// note that you are *not* required to read a filesystem on the entire disk. You could have a disk of size +// 20GB, and a small filesystem of size 50MB that begins 2GB into the disk. +// This is extremely useful for working with filesystems on disk partitions. +// +// Note, however, that it is much easier to do this using the higher-level APIs at github.com/diskfs/go-diskfs +// which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors) +// where a partition starts and ends. +// +// If the provided blocksize is 0, it will use the default of 512 bytes. If it is any number other than 0 +// or 512, it will return an error. +func Read(file util.File, size, start, sectorsize int64) (*FileSystem, error) { + // blocksize must be <=0 or exactly SectorSize512 or error + if sectorsize != int64(SectorSize512) && sectorsize > 0 { + return nil, fmt.Errorf("sectorsize for ext4 must be either 512 bytes or 0, not %d", sectorsize) + } + // we do not check for ext4 max size because it is theoreticallt 1YB, which is bigger than an int64! Even 1ZB is! + if size < Ext4MinSize { + return nil, fmt.Errorf("requested size is smaller than minimum allowed ext4 size %d", Ext4MinSize) + } + + // load the information from the disk + // read boot sector code + bs := make([]byte, BootSectorSize) + n, err := file.ReadAt(bs, start) + if err != nil { + return nil, fmt.Errorf("could not read boot sector bytes from file: %v", err) + } + if uint16(n) < uint16(BootSectorSize) { + return nil, fmt.Errorf("only could read %d boot sector bytes from file", n) + } + + // read the superblock + // the superblock is one minimal block, i.e. 2 sectors + superblockBytes := make([]byte, SuperblockSize) + n, err = file.ReadAt(superblockBytes, start+int64(BootSectorSize)) + if err != nil { + return nil, fmt.Errorf("could not read superblock bytes from file: %v", err) + } + if uint16(n) < uint16(SuperblockSize) { + return nil, fmt.Errorf("only could read %d superblock bytes from file", n) + } + + // convert the bytes into a superblock structure + sb, err := superblockFromBytes(superblockBytes) + if err != nil { + return nil, fmt.Errorf("could not interpret superblock data: %v", err) + } + + // now read the GDT + // how big should the GDT be? + gdtSize := uint64(sb.groupDescriptorSize) * sb.blockGroupCount() + + gdtBytes := make([]byte, gdtSize) + // where do we find the GDT? + // - if blocksize is 1024, then 1024 padding for BootSector is block 0, 1024 for superblock is block 1 + // and then the GDT starts at block 2 + // - if blocksize is larger than 1024, then 1024 padding for BootSector followed by 1024 for superblock + // is block 0, and then the GDT starts at block 1 + gdtBlock := 1 + if sb.blockSize == 1024 { + gdtBlock = 2 + } + n, err = file.ReadAt(gdtBytes, start+int64(gdtBlock)*int64(sb.blockSize)) + if err != nil { + return nil, fmt.Errorf("could not read Group Descriptor Table bytes from file: %v", err) + } + if uint64(n) < gdtSize { + return nil, fmt.Errorf("only could read %d Group Descriptor Table bytes from file instead of %d", n, gdtSize) + } + gdt, err := groupDescriptorsFromBytes(gdtBytes, sb.groupDescriptorSize, sb.checksumSeed, sb.gdtChecksumType()) + if err != nil { + return nil, fmt.Errorf("could not interpret Group Descriptor Table data: %v", err) + } + + return &FileSystem{ + bootSector: bs, + superblock: sb, + groupDescriptors: gdt, + blockGroups: int64(sb.blockGroupCount()), + size: size, + start: start, + file: file, + }, nil +} + +// Type returns the type code for the filesystem. Always returns filesystem.TypeExt4 +func (fs *FileSystem) Type() filesystem.Type { + return filesystem.TypeExt4 +} + +// Mkdir make a directory at the given path. It is equivalent to `mkdir -p`, i.e. idempotent, in that: +// +// * It will make the entire tree path if it does not exist +// * It will not return an error if the path already exists +func (fs *FileSystem) Mkdir(p string) error { + _, err := fs.readDirWithMkdir(p, true) + // we are not interesting in returning the entries + return err +} + +// ReadDir return the contents of a given directory in a given filesystem. +// +// Returns a slice of os.FileInfo with all of the entries in the directory. +// +// Will return an error if the directory does not exist or is a regular file and not a directory +func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { + dir, err := fs.readDirWithMkdir(p, false) + if err != nil { + return nil, fmt.Errorf("error reading directory %s: %v", p, err) + } + // once we have made it here, looping is done. We have found the final entry + // we need to return all of the file info + count := len(dir.entries) + ret := make([]os.FileInfo, count) + for i, e := range dir.entries { + in, err := fs.readInode(e.inode) + if err != nil { + return nil, fmt.Errorf("could not read inode %d at position %d in directory: %v", e.inode, i, err) + } + ret[i] = &FileInfo{ + modTime: in.modifyTime, + name: e.filename, + size: int64(in.size), + isDir: e.fileType == dirFileTypeDirectory, + } + } + + return ret, nil +} + +// OpenFile returns an io.ReadWriter from which you can read the contents of a file +// or write contents to the file +// +// accepts normal os.OpenFile flags +// +// returns an error if the file does not exist +func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { + filename := path.Base(p) + dir := path.Dir(p) + parentDir, entry, err := fs.getEntryAndParent(p) + if err != nil { + return nil, err + } + if entry != nil && entry.fileType == dirFileTypeDirectory { + return nil, fmt.Errorf("cannot open directory %s as file", p) + } + + // see if the file exists + // if the file does not exist, and is not opened for os.O_CREATE, return an error + if entry == nil { + if flag&os.O_CREATE == 0 { + return nil, fmt.Errorf("target file %s does not exist and was not asked to create", p) + } + // else create it + entry, err = fs.mkFile(parentDir, filename) + if err != nil { + return nil, fmt.Errorf("failed to create file %s: %v", p, err) + } + } + // get the inode + inodeNumber := entry.inode + inode, err := fs.readInode(inodeNumber) + if err != nil { + return nil, fmt.Errorf("could not read inode number %d: %v", inodeNumber, err) + } + + // if a symlink, read the target, rather than the inode itself, which does not point to anything + if inode.fileType == fileTypeSymbolicLink { + // is the symlink relative or absolute? + linkTarget := inode.linkTarget + if !path.IsAbs(linkTarget) { + // convert it into an absolute path + // and start the process again + linkTarget = path.Join(dir, linkTarget) + // we probably could make this more efficient by checking if the final linkTarget + // is in the same directory as we already are parsing, rather than walking the whole thing again + // leave that for the future. + linkTarget = path.Clean(linkTarget) + } + return fs.OpenFile(linkTarget, flag) + } + offset := int64(0) + if flag&os.O_APPEND == os.O_APPEND { + offset = int64(inode.size) + } + // when we open a file, we load the inode but also all of the extents + extents, err := inode.extents.blocks(fs) + if err != nil { + return nil, fmt.Errorf("could not read extent tree for inode %d: %v", inodeNumber, err) + } + return &File{ + directoryEntry: entry, + inode: inode, + isReadWrite: flag&os.O_RDWR != 0, + isAppend: flag&os.O_APPEND != 0, + offset: offset, + filesystem: fs, + extents: extents, + }, nil +} + +// Label read the volume label +func (fs *FileSystem) Label() string { + if fs.superblock == nil { + return "" + } + return fs.superblock.volumeLabel +} + +// Rm remove file or directory at path. +// If path is directory, it only will remove if it is empty. +// If path is a file, it will remove the file. +// Will not remove any parents. +// Error if the file does not exist or is not an empty directory +func (fs *FileSystem) Rm(p string) error { + parentDir, entry, err := fs.getEntryAndParent(p) + if err != nil { + return err + } + if parentDir.root && entry == &parentDir.directoryEntry { + return fmt.Errorf("cannot remove root directory") + } + if entry == nil { + return fmt.Errorf("file does not exist: %s", p) + } + // if it is a directory, it must be empty + if entry.fileType == dirFileTypeDirectory { + // read the directory + entries, err := fs.readDirectory(entry.inode) + if err != nil { + return fmt.Errorf("could not read directory %s: %v", p, err) + } + if len(entries) > 2 { + return fmt.Errorf("directory not empty: %s", p) + } + } + // at this point, it is either a file or an empty directory, so remove it + + // free up the blocks + // read the inode to find the blocks + removedInode, err := fs.readInode(entry.inode) + if err != nil { + return fmt.Errorf("could not read inode %d for %s: %v", entry.inode, p, err) + } + extents, err := removedInode.extents.blocks(fs) + if err != nil { + return fmt.Errorf("could not read extents for inode %d for %s: %v", entry.inode, p, err) + } + // clear the inode from the inode bitmap + inodeBG := blockGroupForInode(int(entry.inode), fs.superblock.inodesPerGroup) + inodeBitmap, err := fs.readInodeBitmap(inodeBG) + if err != nil { + return fmt.Errorf("could not read inode bitmap: %v", err) + } + // clear up the blocks from the block bitmap. We are not clearing the block content, just the bitmap. + // keep a cache of bitmaps, so we do not have to read them again and again + blockBitmaps := make(map[int]*util.Bitmap) + for _, e := range extents { + for i := e.startingBlock; i < e.startingBlock+uint64(e.count); i++ { + // determine what block group this block is in, and read the bitmap for that blockgroup + bg := blockGroupForBlock(int(i), fs.superblock.blocksPerGroup) + dataBlockBitmap, ok := blockBitmaps[bg] + if !ok { + dataBlockBitmap, err = fs.readBlockBitmap(bg) + if err != nil { + return fmt.Errorf("could not read block bitmap: %v", err) + } + blockBitmaps[bg] = dataBlockBitmap + } + // the extent lists the absolute block number, but the bitmap is relative to the block group + blockInBG := int(i) - int(fs.superblock.blocksPerGroup)*bg + if err := dataBlockBitmap.Clear(blockInBG); err != nil { + return fmt.Errorf("could not clear block bitmap for block %d: %v", i, err) + } + } + } + for bg, dataBlockBitmap := range blockBitmaps { + if err := fs.writeBlockBitmap(dataBlockBitmap, bg); err != nil { + return fmt.Errorf("could not write block bitmap back to disk: %v", err) + } + } + + // remove the directory entry from the parent + newEntries := make([]*directoryEntry, 0, len(parentDir.entries)-1) + for _, e := range parentDir.entries { + if e.inode == entry.inode { + continue + } + newEntries = append(newEntries, e) + } + parentDir.entries = newEntries + // write the parent directory back + dirBytes := parentDir.toBytes(fs.superblock.blockSize, directoryChecksumAppender(fs.superblock.checksumSeed, parentDir.inode, 0)) + parentInode, err := fs.readInode(parentDir.inode) + if err != nil { + return fmt.Errorf("could not read inode %d for %s: %v", entry.inode, path.Base(p), err) + } + extents, err = parentInode.extents.blocks(fs) + if err != nil { + return fmt.Errorf("could not read extents for inode %d for %s: %v", entry.inode, path.Base(p), err) + } + for _, e := range extents { + for i := 0; i < int(e.count); i++ { + b := dirBytes[i:fs.superblock.blockSize] + if _, err := fs.file.WriteAt(b, (int64(i)+int64(e.startingBlock))*int64(fs.superblock.blockSize)); err != nil { + return fmt.Errorf("could not write inode bitmap back to disk: %v", err) + } + } + } + + // remove the inode from the bitmap and write the inode bitmap back + // inode is absolute, but bitmap is relative to block group + inodeInBG := int(entry.inode) - int(fs.superblock.inodesPerGroup)*inodeBG + if err := inodeBitmap.Clear(inodeInBG); err != nil { + return fmt.Errorf("could not clear inode bitmap for inode %d: %v", entry.inode, err) + } + + // write the inode bitmap back + if err := fs.writeInodeBitmap(inodeBitmap, inodeBG); err != nil { + return fmt.Errorf("could not write inode bitmap back to disk: %v", err) + } + // update the group descriptor + gd := fs.groupDescriptors.descriptors[inodeBG] + + // update the group descriptor inodes and blocks + gd.freeInodes++ + gd.freeBlocks += uint32(removedInode.blocks) + // write the group descriptor back + gdBytes := gd.toBytes(fs.superblock.gdtChecksumType(), fs.superblock.uuid.ID()) + gdtBlock := 1 + if fs.superblock.blockSize == 1024 { + gdtBlock = 2 + } + if _, err := fs.file.WriteAt(gdBytes, fs.start+int64(gdtBlock)*int64(fs.superblock.blockSize)+int64(gd.number)*int64(fs.superblock.groupDescriptorSize)); err != nil { + return fmt.Errorf("could not write Group Descriptor bytes to file: %v", err) + } + + // we could remove the inode from the inode table in the group descriptor, + // but we do not need to do so. Since we are not reusing the inode, we can just leave it there, + // the bitmap always is checked before reusing an inode location. + fs.superblock.freeInodes++ + fs.superblock.freeBlocks += removedInode.blocks + return fs.writeSuperblock() +} + +func (fs *FileSystem) Truncate(p string, size int64) error { + _, entry, err := fs.getEntryAndParent(p) + if err != nil { + return err + } + if entry == nil { + return fmt.Errorf("file does not exist: %s", p) + } + if entry.fileType == dirFileTypeDirectory { + return fmt.Errorf("cannot truncate directory %s", p) + } + // it is not a directory, and it exists, so truncate it + inode, err := fs.readInode(entry.inode) + if err != nil { + return fmt.Errorf("could not read inode %d in directory: %v", entry.inode, err) + } + // change the file size + inode.size = uint64(size) + + // free used blocks if shrank, or reserve new blocks if grew + // both of which mean updating the superblock, and the extents tree in the inode + + // write the inode back + return fs.writeInode(inode) +} + +// getEntryAndParent given a path, get the Directory for the parent and the directory entry for the file. +// If the directory does not exist, returns an error. +// If the file does not exist, does not return an error, but rather returns a nil entry. +func (fs *FileSystem) getEntryAndParent(p string) (parent *Directory, entry *directoryEntry, err error) { + dir := path.Dir(p) + filename := path.Base(p) + // get the directory entries + parentDir, err := fs.readDirWithMkdir(dir, false) + if err != nil { + return nil, nil, fmt.Errorf("could not read directory entries for %s", dir) + } + // we now know that the directory exists, see if the file exists + var targetEntry *directoryEntry + if parentDir.root && filename == "/" { + // root directory + return parentDir, &parentDir.directoryEntry, nil + } + + for _, e := range parentDir.entries { + if e.filename != filename { + continue + } + // if we got this far, we have found the file + targetEntry = e + break + } + return parentDir, targetEntry, nil +} + +// Stat return fs.FileInfo about a specific file path. +func (fs *FileSystem) Stat(p string) (iofs.FileInfo, error) { + _, entry, err := fs.getEntryAndParent(p) + if err != nil { + return nil, err + } + if entry == nil { + return nil, fmt.Errorf("file does not exist: %s", p) + } + in, err := fs.readInode(entry.inode) + if err != nil { + return nil, fmt.Errorf("could not read inode %d in directory: %v", entry.inode, err) + } + return &FileInfo{ + modTime: in.modifyTime, + name: entry.filename, + size: int64(in.size), + isDir: entry.fileType == dirFileTypeDirectory, + }, nil +} + +// SetLabel changes the label on the writable filesystem. Different file system may hav different +// length constraints. +func (fs *FileSystem) SetLabel(label string) error { + fs.superblock.volumeLabel = label + return fs.writeSuperblock() +} + +// readInode read a single inode from disk +func (fs *FileSystem) readInode(inodeNumber uint32) (*inode, error) { + if inodeNumber == 0 { + return nil, fmt.Errorf("cannot read inode 0") + } + sb := fs.superblock + inodeSize := sb.inodeSize + inodesPerGroup := sb.inodesPerGroup + // figure out which block group the inode is on + bg := (inodeNumber - 1) / inodesPerGroup + // read the group descriptor to find out the location of the inode table + gd := fs.groupDescriptors.descriptors[bg] + inodeTableBlock := gd.inodeTableLocation + inodeBytes := make([]byte, inodeSize) + // bytesStart is beginning byte for the inodeTableBlock + byteStart := inodeTableBlock * uint64(sb.blockSize) + // offsetInode is how many inodes in our inode is + offsetInode := (inodeNumber - 1) % inodesPerGroup + // offset is how many bytes in our inode is + offset := offsetInode * uint32(inodeSize) + read, err := fs.file.ReadAt(inodeBytes, int64(byteStart)+int64(offset)) + if err != nil { + return nil, fmt.Errorf("failed to read inode %d from offset %d of block %d from block group %d: %v", inodeNumber, offset, inodeTableBlock, bg, err) + } + if read != int(inodeSize) { + return nil, fmt.Errorf("read %d bytes for inode %d instead of inode size of %d", read, inodeNumber, inodeSize) + } + inode, err := inodeFromBytes(inodeBytes, sb, inodeNumber) + if err != nil { + return nil, fmt.Errorf("could not interpret inode data: %v", err) + } + // fill in symlink target if needed + if inode.fileType == fileTypeSymbolicLink && inode.linkTarget == "" { + // read the symlink target + extents, err := inode.extents.blocks(fs) + if err != nil { + return nil, fmt.Errorf("could not read extent tree for symlink inode %d: %v", inodeNumber, err) + } + b, err := fs.readFileBytes(extents, inode.size) + if err != nil { + return nil, fmt.Errorf("could not read symlink target for inode %d: %v", inodeNumber, err) + } + inode.linkTarget = string(b) + } + return inode, nil +} + +// writeInode write a single inode to disk +func (fs *FileSystem) writeInode(i *inode) error { + sb := fs.superblock + inodeSize := sb.inodeSize + inodesPerGroup := sb.inodesPerGroup + // figure out which block group the inode is on + bg := (i.number - 1) / inodesPerGroup + // read the group descriptor to find out the location of the inode table + gd := fs.groupDescriptors.descriptors[bg] + inodeTableBlock := gd.inodeTableLocation + // bytesStart is beginning byte for the inodeTableBlock + // byteStart := inodeTableBlock * sb.blockSize + // offsetInode is how many inodes in our inode is + offsetInode := (i.number - 1) % inodesPerGroup + byteStart := inodeTableBlock * uint64(sb.blockSize) + // offsetInode is how many inodes in our inode is + // offset is how many bytes in our inode is + // offset is how many bytes in our inode is + offset := int64(offsetInode) * int64(inodeSize) + inodeBytes := i.toBytes(sb) + wrote, err := fs.file.WriteAt(inodeBytes, int64(byteStart)+offset) + if err != nil { + return fmt.Errorf("failed to write inode %d at offset %d of block %d from block group %d: %v", i.number, offset, inodeTableBlock, bg, err) + } + if wrote != int(inodeSize) { + return fmt.Errorf("wrote %d bytes for inode %d instead of inode size of %d", wrote, i.number, inodeSize) + } + return nil +} + +// read directory entries for a given directory +func (fs *FileSystem) readDirectory(inodeNumber uint32) ([]*directoryEntry, error) { + // read the inode for the directory + in, err := fs.readInode(inodeNumber) + if err != nil { + return nil, fmt.Errorf("could not read inode %d for directory: %v", inodeNumber, err) + } + // convert the extent tree into a sorted list of extents + extents, err := in.extents.blocks(fs) + if err != nil { + return nil, fmt.Errorf("unable to get blocks for inode %d: %w", in.number, err) + } + // read the contents of the file across all blocks + b, err := fs.readFileBytes(extents, in.size) + if err != nil { + return nil, fmt.Errorf("error reading file bytes for inode %d: %v", inodeNumber, err) + } + + var dirEntries []*directoryEntry + // TODO: none of this works for hashed dir entries, indicated by in.flags.hashedDirectoryIndexes == true + if in.flags.hashedDirectoryIndexes { + treeRoot, err := parseDirectoryTreeRoot(b[:fs.superblock.blockSize], fs.superblock.features.largeDirectory) + if err != nil { + return nil, fmt.Errorf("failed to parse directory tree root: %v", err) + } + subDirEntries, err := parseDirEntriesHashed(b, treeRoot.depth, treeRoot, fs.superblock.blockSize, fs.superblock.features.metadataChecksums, in.number, in.nfsFileVersion, fs.superblock.checksumSeed) + if err != nil { + return nil, fmt.Errorf("failed to parse hashed directory entries: %v", err) + } + // include the dot and dotdot entries from treeRoot; they do not show up in the hashed entries + dirEntries = []*directoryEntry{treeRoot.dotEntry, treeRoot.dotDotEntry} + dirEntries = append(dirEntries, subDirEntries...) + } else { + // convert into directory entries + dirEntries, err = parseDirEntriesLinear(b, fs.superblock.features.metadataChecksums, fs.superblock.blockSize, in.number, in.nfsFileVersion, fs.superblock.checksumSeed) + } + + return dirEntries, err +} + +// readFileBytes read all of the bytes for an individual file pointed at by a given inode +// normally not very useful, but helpful when reading an entire directory. +func (fs *FileSystem) readFileBytes(extents extents, filesize uint64) ([]byte, error) { + // walk through each one, gobbling up the bytes + b := make([]byte, 0, fs.superblock.blockSize) + for i, e := range extents { + start := e.startingBlock * uint64(fs.superblock.blockSize) + count := uint64(e.count) * uint64(fs.superblock.blockSize) + if uint64(len(b))+count > filesize { + count = filesize - uint64(len(b)) + } + b2 := make([]byte, count) + read, err := fs.file.ReadAt(b2, int64(start)) + if err != nil { + return nil, fmt.Errorf("failed to read bytes for extent %d: %v", i, err) + } + if read != int(count) { + return nil, fmt.Errorf("read %d bytes instead of %d for extent %d", read, count, i) + } + b = append(b, b2...) + if uint64(len(b)) >= filesize { + break + } + } + return b, nil +} + +// mkFile make a file with a given name in the given directory. +func (fs *FileSystem) mkFile(parent *Directory, name string) (*directoryEntry, error) { + return fs.mkDirEntry(parent, name, false) +} + +// readDirWithMkdir - walks down a directory tree to the last entry in p. +// For example, if p is /a/b/c, it will walk down to c. +// Expects c to be a directory. +// If each step in the tree does not exist, it will either make it if doMake is true, or return an error. +func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, error) { + paths := splitPath(p) + + // walk down the directory tree until all paths have been walked or we cannot find something + // start with the root directory + var entries []*directoryEntry + currentDir := &Directory{ + directoryEntry: directoryEntry{ + inode: rootInode, + filename: "", + fileType: dirFileTypeDirectory, + }, + root: true, + } + entries, err := fs.readDirectory(rootInode) + if err != nil { + return nil, fmt.Errorf("failed to read directory %s", "/") + } + currentDir.entries = entries + for i, subp := range paths { + // do we have an entry whose name is the same as this name? + found := false + for _, e := range entries { + if e.filename != subp { + continue + } + if e.fileType != dirFileTypeDirectory { + return nil, fmt.Errorf("cannot create directory at %s since it is a file", "/"+strings.Join(paths[0:i+1], "/")) + } + // the filename matches, and it is a subdirectory, so we can break after saving the directory entry, which contains the inode + found = true + currentDir = &Directory{ + directoryEntry: *e, + } + break + } + + // if not, either make it, retrieve its cluster and entries, and loop; + // or error out + if !found { + if doMake { + var subdirEntry *directoryEntry + subdirEntry, err = fs.mkSubdir(currentDir, subp) + if err != nil { + return nil, fmt.Errorf("failed to create subdirectory %s", "/"+strings.Join(paths[0:i+1], "/")) + } + // save where we are to search next + currentDir = &Directory{ + directoryEntry: *subdirEntry, + } + } else { + return nil, fmt.Errorf("path %s not found", "/"+strings.Join(paths[0:i+1], "/")) + } + } + // get all of the entries in this directory + entries, err = fs.readDirectory(currentDir.inode) + if err != nil { + return nil, fmt.Errorf("failed to read directory %s", "/"+strings.Join(paths[0:i+1], "/")) + } + currentDir.entries = entries + } + // once we have made it here, looping is done; we have found the final entry + currentDir.entries = entries + return currentDir, nil +} + +// readBlock read a single block from disk +func (fs *FileSystem) readBlock(blockNumber uint64) ([]byte, error) { + sb := fs.superblock + // bytesStart is beginning byte for the inodeTableBlock + byteStart := blockNumber * uint64(sb.blockSize) + blockBytes := make([]byte, sb.blockSize) + read, err := fs.file.ReadAt(blockBytes, int64(byteStart)) + if err != nil { + return nil, fmt.Errorf("failed to read block %d: %v", blockNumber, err) + } + if read != int(sb.blockSize) { + return nil, fmt.Errorf("read %d bytes for block %d instead of size of %d", read, blockNumber, sb.blockSize) + } + return blockBytes, nil +} + +// recalculate blocksize based on the existing number of blocks +// - 0 <= blocks < 3MM : floppy - blocksize = 1024 +// - 3MM <= blocks < 512MM : small - blocksize = 1024 +// - 512MM <= blocks < 4*1024*1024MM : default - blocksize = +// - 4*1024*1024MM <= blocks < 16*1024*1024MM : big - blocksize = +// - 16*1024*1024MM <= blocks : huge - blocksize = +// +// the original code from e2fsprogs https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/misc/mke2fs.c +func recalculateBlocksize(numblocks, size int64) (sectorsPerBlock int, blocksize uint32, numBlocksAdjusted int64) { + var ( + million64 = int64(million) + sectorSize512 = uint32(SectorSize512) + ) + switch { + case 0 <= numblocks && numblocks < 3*million64: + sectorsPerBlock = 2 + blocksize = 2 * sectorSize512 + case 3*million64 <= numblocks && numblocks < 512*million64: + sectorsPerBlock = 2 + blocksize = 2 * sectorSize512 + case 512*million64 <= numblocks && numblocks < 4*1024*1024*million64: + sectorsPerBlock = 2 + blocksize = 2 * sectorSize512 + case 4*1024*1024*million64 <= numblocks && numblocks < 16*1024*1024*million64: + sectorsPerBlock = 2 + blocksize = 2 * sectorSize512 + case numblocks > 16*1024*1024*million64: + sectorsPerBlock = 2 + blocksize = 2 * sectorSize512 + } + return sectorsPerBlock, blocksize, size / int64(blocksize) +} + +// mkSubdir make a subdirectory of a given name inside the parent +// 1- allocate a single data block for the directory +// 2- create an inode in the inode table pointing to that data block +// 3- mark the inode in the inode bitmap +// 4- mark the data block in the data block bitmap +// 5- create a directory entry in the parent directory data blocks +func (fs *FileSystem) mkSubdir(parent *Directory, name string) (*directoryEntry, error) { + return fs.mkDirEntry(parent, name, true) +} + +func (fs *FileSystem) mkDirEntry(parent *Directory, name string, isDir bool) (*directoryEntry, error) { + // still to do: + // - write directory entry in parent + // - write inode to disk + + // create an inode + inodeNumber, err := fs.allocateInode(parent.inode) + if err != nil { + return nil, fmt.Errorf("could not allocate inode for file %s: %w", name, err) + } + // get extents for the file - prefer in the same block group as the inode, if possible + newExtents, err := fs.allocateExtents(1, nil) + if err != nil { + return nil, fmt.Errorf("could not allocate disk space for file %s: %w", name, err) + } + extentTreeParsed, err := extendExtentTree(nil, newExtents, fs, nil) + if err != nil { + return nil, fmt.Errorf("could not convert extents into tree: %w", err) + } + // normally, after getting a tree from extents, you would need to then allocate all of the blocks + // in the extent tree - leafs and intermediate. However, because we are allocating a new directory + // with a single extent, we *know* it can fit in the inode itself (which has a max of 4), so no need + + // create a directory entry for the file + deFileType := dirFileTypeRegular + fileType := fileTypeRegularFile + var contentSize uint64 + if isDir { + deFileType = dirFileTypeDirectory + fileType = fileTypeDirectory + contentSize = uint64(fs.superblock.blockSize) + } + de := directoryEntry{ + inode: inodeNumber, + filename: name, + fileType: deFileType, + } + parent.entries = append(parent.entries, &de) + // write the parent out to disk + bytesPerBlock := fs.superblock.blockSize + parentDirBytes := parent.toBytes(bytesPerBlock, directoryChecksumAppender(fs.superblock.checksumSeed, parent.inode, 0)) + // check if parent has increased in size beyond allocated blocks + parentInode, err := fs.readInode(parent.inode) + if err != nil { + return nil, fmt.Errorf("could not read inode %d of parent directory: %w", parent.inode, err) + } + + // write the directory entry in the parent + // figure out which block it goes into, and possibly rebalance the directory entries hash tree + parentExtents, err := parentInode.extents.blocks(fs) + if err != nil { + return nil, fmt.Errorf("could not read parent extents for directory: %w", err) + } + dirFile := &File{ + inode: parentInode, + directoryEntry: &directoryEntry{ + inode: parent.inode, + filename: name, + fileType: dirFileTypeDirectory, + }, + filesystem: fs, + isReadWrite: true, + isAppend: true, + offset: 0, + extents: parentExtents, + } + wrote, err := dirFile.Write(parentDirBytes) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("unable to write new directory: %w", err) + } + if wrote != len(parentDirBytes) { + return nil, fmt.Errorf("wrote only %d bytes instead of expected %d for new directory", wrote, len(parentDirBytes)) + } + + // write the inode for the new entry out + now := time.Now() + in := inode{ + number: inodeNumber, + permissionsGroup: parentInode.permissionsGroup, + permissionsOwner: parentInode.permissionsOwner, + permissionsOther: parentInode.permissionsOther, + fileType: fileType, + owner: parentInode.owner, + group: parentInode.group, + size: contentSize, + hardLinks: 2, + blocks: newExtents.blockCount(), + flags: &inodeFlags{}, + nfsFileVersion: 0, + version: 0, + inodeSize: parentInode.inodeSize, + deletionTime: 0, + accessTime: now, + changeTime: now, + createTime: now, + modifyTime: now, + extendedAttributeBlock: 0, + project: 0, + extents: extentTreeParsed, + } + // write the inode to disk + if err := fs.writeInode(&in); err != nil { + return nil, fmt.Errorf("could not write inode for new directory: %w", err) + } + // if a directory, put entries for . and .. in the first block for the new directory + if isDir { + initialEntries := []*directoryEntry{ + { + inode: inodeNumber, + filename: ".", + fileType: dirFileTypeDirectory, + }, + { + inode: parent.inode, + filename: "..", + fileType: dirFileTypeDirectory, + }, + } + newDir := Directory{ + directoryEntry: de, + root: false, + entries: initialEntries, + } + dirBytes := newDir.toBytes(fs.superblock.blockSize, directoryChecksumAppender(fs.superblock.checksumSeed, inodeNumber, 0)) + // write the bytes out to disk + dirFile = &File{ + inode: &in, + directoryEntry: &directoryEntry{ + inode: inodeNumber, + filename: name, + fileType: dirFileTypeDirectory, + }, + filesystem: fs, + isReadWrite: true, + isAppend: true, + offset: 0, + extents: *newExtents, + } + wrote, err := dirFile.Write(dirBytes) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("unable to write new directory: %w", err) + } + if wrote != len(dirBytes) { + return nil, fmt.Errorf("wrote only %d bytes instead of expected %d for new entry", wrote, len(dirBytes)) + } + } + + // return + return &de, nil +} + +// allocateInode allocate a single inode +// passed the parent, so it can know where to allocate it +// logic: +// - parent is 0 : root inode, will allocate at 2 +// - parent is 2 : child of root, will try to spread out +// - else : try to collocate with parent, if possible +func (fs *FileSystem) allocateInode(parent uint32) (uint32, error) { + var ( + inodeNumber = -1 + ) + if parent == 0 { + inodeNumber = 2 + } + // load the inode bitmap + var ( + bg int + gd groupDescriptor + ) + + for _, gd = range fs.groupDescriptors.descriptors { + if inodeNumber != -1 { + break + } + bg := int(gd.number) + bm, err := fs.readInodeBitmap(bg) + if err != nil { + return 0, fmt.Errorf("could not read inode bitmap: %w", err) + } + // get first free inode + inodeNumber = bm.FirstFree(0) + // if we found a + if inodeNumber == -1 { + continue + } + // set it as marked + if err := bm.Set(inodeNumber); err != nil { + return 0, fmt.Errorf("could not set inode bitmap: %w", err) + } + // write the inode bitmap bytes + if err := fs.writeInodeBitmap(bm, bg); err != nil { + return 0, fmt.Errorf("could not write inode bitmap: %w", err) + } + } + if inodeNumber == -1 { + return 0, errors.New("no free inodes available") + } + + // reduce number of free inodes in that descriptor in the group descriptor table + gd.freeInodes-- + + // get the group descriptor as bytes + gdBytes := gd.toBytes(fs.superblock.gdtChecksumType(), fs.superblock.uuid.ID()) + + // write the group descriptor bytes + // gdt starts in block 1 of any redundant copies, specifically in BG 0 + gdtBlock := 1 + blockByteLocation := gdtBlock * int(fs.superblock.blockSize) + gdOffset := fs.start + int64(blockByteLocation) + int64(bg)*int64(fs.superblock.groupDescriptorSize) + wrote, err := fs.file.WriteAt(gdBytes, gdOffset) + if err != nil { + return 0, fmt.Errorf("unable to write group descriptor bytes for blockgroup %d: %v", bg, err) + } + if wrote != len(gdBytes) { + return 0, fmt.Errorf("wrote only %d bytes instead of expected %d for group descriptor of block group %d", wrote, len(gdBytes), bg) + } + + return uint32(inodeNumber), nil +} + +// allocateExtents allocate the data blocks in extents that are +// to be used for a file of a given size +// arguments are file size in bytes and existing extents +// if previous is nil, then we are not (re)sizing an existing file but creating a new one +// returns the extents to be used in order +func (fs *FileSystem) allocateExtents(size uint64, previous *extents) (*extents, error) { + // 1- calculate how many blocks are needed + required := size / uint64(fs.superblock.blockSize) + remainder := size % uint64(fs.superblock.blockSize) + if remainder > 0 { + required++ + } + // 2- see how many blocks already are allocated + var allocated uint64 + if previous != nil { + allocated = previous.blockCount() + } + // 3- if needed, allocate new blocks in extents + extraBlockCount := required - allocated + // if we have enough, do not add anything + if extraBlockCount <= 0 { + return previous, nil + } + + // if there are not enough blocks left on the filesystem, return an error + if fs.superblock.freeBlocks < extraBlockCount { + return nil, fmt.Errorf("only %d blocks free, requires additional %d", fs.superblock.freeBlocks, extraBlockCount) + } + + // now we need to look for as many contiguous blocks as possible + // first calculate the minimum number of extents needed + + // if all of the extents, except possibly the last, are maximum size, then we need minExtents extents + // we loop through, trying to allocate an extent as large as our remaining blocks or maxBlocksPerExtent, + // whichever is smaller + blockGroupCount := fs.blockGroups + // TODO: instead of starting with BG 0, should start with BG where the inode for this file/dir is located + var ( + newExtents []extent + datablockBitmaps = map[int]*util.Bitmap{} + blocksPerGroup = fs.superblock.blocksPerGroup + ) + + var i int64 + for i = 0; i < blockGroupCount && allocated < extraBlockCount; i++ { + // keep track if we allocated anything in this blockgroup + // 1- read the GDT for this blockgroup to find the location of the block bitmap + // and total free blocks + // 2- read the block bitmap from disk + // 3- find the maximum contiguous space available + bs, err := fs.readBlockBitmap(int(i)) + if err != nil { + return nil, fmt.Errorf("could not read block bitmap for block group %d: %v", i, err) + } + // now find our unused blocks and how many there are in a row as potential extents + if extraBlockCount > maxUint16 { + return nil, fmt.Errorf("cannot allocate more than %d blocks in a single extent", maxUint16) + } + // get the list of free blocks + blockList := bs.FreeList() + + // create possible extents by size + // Step 3: Group contiguous blocks into extents + var extents []extent + for _, freeBlock := range blockList { + start, length := freeBlock.Position, freeBlock.Count + for length > 0 { + extentLength := min(length, int(maxBlocksPerExtent)) + extents = append(extents, extent{startingBlock: uint64(start) + uint64(i)*uint64(blocksPerGroup), count: uint16(extentLength)}) + start += extentLength + length -= extentLength + } + } + + // sort in descending order + sort.Slice(extents, func(i, j int) bool { + return extents[i].count > extents[j].count + }) + + var allocatedBlocks uint64 + for _, ext := range extents { + if extraBlockCount <= 0 { + break + } + extentToAdd := ext + if uint64(ext.count) >= extraBlockCount { + extentToAdd = extent{startingBlock: ext.startingBlock, count: uint16(extraBlockCount)} + } + newExtents = append(newExtents, extentToAdd) + allocatedBlocks += uint64(extentToAdd.count) + extraBlockCount -= uint64(extentToAdd.count) + // set the marked blocks in the bitmap, and save the bitmap + for block := extentToAdd.startingBlock; block < extentToAdd.startingBlock+uint64(extentToAdd.count); block++ { + // determine what block group this block is in, and read the bitmap for that blockgroup + // the extent lists the absolute block number, but the bitmap is relative to the block group + blockInGroup := block - uint64(i)*uint64(blocksPerGroup) + if err := bs.Set(int(blockInGroup)); err != nil { + return nil, fmt.Errorf("could not clear block bitmap for block %d: %v", i, err) + } + } + + // do *not* write the bitmap back yet, as we do not yet know if we will be able to fulfill the entire request. + // instead save it for later + datablockBitmaps[int(i)] = bs + } + } + if extraBlockCount > 0 { + return nil, fmt.Errorf("could not allocate %d blocks", extraBlockCount) + } + + // write the block bitmaps back to disk + for bg, bs := range datablockBitmaps { + if err := fs.writeBlockBitmap(bs, bg); err != nil { + return nil, fmt.Errorf("could not write block bitmap for block group %d: %v", bg, err) + } + } + + // need to update the total blocks used/free in superblock + fs.superblock.freeBlocks -= allocated + // update the blockBitmapChecksum for any updated block groups in GDT + // write updated superblock and GDT to disk + if err := fs.writeSuperblock(); err != nil { + return nil, fmt.Errorf("could not write superblock: %w", err) + } + // write backup copies + var exten extents = newExtents + return &exten, nil +} + +// readInodeBitmap read the inode bitmap off the disk. +// This would be more efficient if we just read one group descriptor's bitmap +// but for now we are about functionality, not efficiency, so it will read the whole thing. +func (fs *FileSystem) readInodeBitmap(group int) (*util.Bitmap, error) { + if group >= len(fs.groupDescriptors.descriptors) { + return nil, fmt.Errorf("block group %d does not exist", group) + } + gd := fs.groupDescriptors.descriptors[group] + bitmapLocation := gd.inodeBitmapLocation + bitmapByteCount := fs.superblock.inodesPerGroup / 8 + b := make([]byte, bitmapByteCount) + offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start)) + read, err := fs.file.ReadAt(b, offset) + if err != nil { + return nil, fmt.Errorf("unable to read inode bitmap for blockgroup %d: %w", gd.number, err) + } + if read != int(bitmapByteCount) { + return nil, fmt.Errorf("Read %d bytes instead of expected %d for inode bitmap of block group %d", read, bitmapByteCount, gd.number) + } + // only take bytes corresponding to the number of inodes per group + + // create a bitmap + bs := util.NewBitmap(int(fs.superblock.blockSize) * len(fs.groupDescriptors.descriptors)) + bs.FromBytes(b) + return bs, nil +} + +// writeInodeBitmap write the inode bitmap to the disk. +func (fs *FileSystem) writeInodeBitmap(bm *util.Bitmap, group int) error { + if group >= len(fs.groupDescriptors.descriptors) { + return fmt.Errorf("block group %d does not exist", group) + } + b := bm.ToBytes() + gd := fs.groupDescriptors.descriptors[group] + bitmapByteCount := fs.superblock.inodesPerGroup / 8 + bitmapLocation := gd.inodeBitmapLocation + offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start)) + wrote, err := fs.file.WriteAt(b, offset) + if err != nil { + return fmt.Errorf("unable to write inode bitmap for blockgroup %d: %w", gd.number, err) + } + if wrote != int(bitmapByteCount) { + return fmt.Errorf("wrote %d bytes instead of expected %d for inode bitmap of block group %d", wrote, bitmapByteCount, gd.number) + } + + return nil +} + +func (fs *FileSystem) readBlockBitmap(group int) (*util.Bitmap, error) { + if group >= len(fs.groupDescriptors.descriptors) { + return nil, fmt.Errorf("block group %d does not exist", group) + } + gd := fs.groupDescriptors.descriptors[group] + bitmapLocation := gd.blockBitmapLocation + b := make([]byte, fs.superblock.blockSize) + offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start)) + read, err := fs.file.ReadAt(b, offset) + if err != nil { + return nil, fmt.Errorf("unable to read block bitmap for blockgroup %d: %w", gd.number, err) + } + if read != int(fs.superblock.blockSize) { + return nil, fmt.Errorf("Read %d bytes instead of expected %d for block bitmap of block group %d", read, fs.superblock.blockSize, gd.number) + } + // create a bitmap + bs := util.NewBitmap(int(fs.superblock.blockSize) * len(fs.groupDescriptors.descriptors)) + bs.FromBytes(b) + return bs, nil +} + +// writeBlockBitmap write the inode bitmap to the disk. +func (fs *FileSystem) writeBlockBitmap(bm *util.Bitmap, group int) error { + if group >= len(fs.groupDescriptors.descriptors) { + return fmt.Errorf("block group %d does not exist", group) + } + b := bm.ToBytes() + gd := fs.groupDescriptors.descriptors[group] + bitmapLocation := gd.blockBitmapLocation + offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start)) + wrote, err := fs.file.WriteAt(b, offset) + if err != nil { + return fmt.Errorf("unable to write block bitmap for blockgroup %d: %w", gd.number, err) + } + if wrote != int(fs.superblock.blockSize) { + return fmt.Errorf("wrote %d bytes instead of expected %d for block bitmap of block group %d", wrote, fs.superblock.blockSize, gd.number) + } + + return nil +} + +func (fs *FileSystem) writeSuperblock() error { + superblockBytes, err := fs.superblock.toBytes() + if err != nil { + return fmt.Errorf("could not convert superblock to bytes: %v", err) + } + _, err = fs.file.WriteAt(superblockBytes, fs.start+int64(BootSectorSize)) + return err +} + +func blockGroupForInode(inodeNumber int, inodesPerGroup uint32) int { + return (inodeNumber - 1) / int(inodesPerGroup) +} +func blockGroupForBlock(blockNumber int, blocksPerGroup uint32) int { + return (blockNumber - 1) / int(blocksPerGroup) +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.md b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.md new file mode 100644 index 00000000000..0cfa7191f73 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/ext4.md @@ -0,0 +1,335 @@ +# ext4 +This file describes the layout on disk of ext4. It is a living document and probably will be deleted rather than committed to git. + +The primary reference document is [here](https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout). + +Also useful are: + +* https://blogs.oracle.com/linux/post/understanding-ext4-disk-layout-part-2 +* https://www.sans.org/blog/understanding-ext4-part-6-directories/ - blog series +* https://digital-forensics.sans.org/blog/2017/06/07/understanding-ext4-part-6-directories +* https://metebalci.com/blog/a-minimum-complete-tutorial-of-linux-ext4-file-system/ + +## Concepts + +* Sector: a section of 512 bytes +* Block: a contiguous group of sectors. Block size usually is either 4K (4096 bytes) or 1K (1024 bytes), i.e. 8 sectors or 2 sectors. Block size minimum is 1KB (2 sectors), max is 64KB (128 sectors). Each block is associated with exactly one file. A file may contain more than one block - e.g. if a file is larger than the size of a single block - but each block belongs to exactly one file. +* inode: metadata about a file or directory. Each inode contains metadata about exactly one file. The number of inodes in a system is identical to the number of blocks for 32-bit, or far fewer for 64-bit. +* Block group: a contiguous group of blocks. Each block group is (`8*block_size_in_bytes`) blocks. So if block size is 4K, or 4096 bytes, then a block group is `8*4096` = 32,768 blocks, each of size 4096 bytes, for a block group of 128MB. If block size is 1K, a block group is 8192 blocks, or 8MB. +* 64-bit feature: ext4 filesystems normally uses 32-bit, which means the maximum blocks per filesystem is 2^32. If the 64-bit feature is enabled, then the maximum blocks per filesystem is 2^64. +* Superblock: A block that contains information about the entire filesystem. Exists in block group 0 and sometimes is backed up to other block groups. The superblock contains information about the filesystem as a whole: inode size, block size, last mount time, etc. +* Block Group Descriptor: Block Group Descriptors contain information about each block group: start block, end block, inodes, etc. One Descriptor per Group. But it is stored next to the Superblock (and backups), not with each Group. +* Extent: an extent is a contiguous group of blocks. Extents are used to store files. Extents are mapped beginning with the inode, and provide the way of getting from an inode to the blocks that contain the file's data. + + +### Block Group + +Each block group is built in the following order. There is a distinction between Group 0 - the first one +in the filesystem - and all others. + +Block groups come in one of several types. It isn't necessary to list all of them here. The key elements are as follows. + +Block 0: + +1. Padding: 1024 bytes, used for boot sector + +Block 0 (above 1024 bytes, if blocksize >1024) or Block 1; all backup blocks: + +2. Superblock: One block +3. Group Descriptors: Many blocks +4. Reserved GDT Blocks: Many blocks, reserved in case we need to expand to more Group Descriptors in the future + +All blocks: + +5. Data block bitmap: 1 block. One bit per block in the block group. Set to 1 if a data block is in use, 0 if not. +6. inode bitmap: 1 block. One bit per inode in the block group. Set to 1 if an inode is in use, 0 if not. +7. inode table: many blocks. Calculated by `(inodes_per_group)*(size_of_inode)`. Remember that `inodes_per_group` = `blocks_per_group` = `8*block_size_in_bytes`. The original `size_of_inode` in ext2 was 128 bytes. In ext4 it uses 156 bytes, but is stored in 256 bytes of space, so `inode_size_in_bytes` = 256 bytes. +8. Data blocks: all of the rest of the blocks in the block group + +The variant on the above is with Flexible Block Groups. If flexbg is enabled, then block groups are grouped together, normally +groups of 16 (but the actual number is in the superblock). The data block bitmap, inode bitmap and inode table are +in the first block group for each flexible block group. + +This means you can have all sorts of combinations: + +* block that is both first in a block group (contains block bitmap, inode bitmap, inode table) and superblock/backup (contains superblock, GDT, reserved GDT blocks) +* block that is first in a block group (block bitmap, inode bitmap, inode table) but not first in a block group or Flex BG +* block that is superblock/backup (superblock, GDT, reserved GDT blocks) but not first in a block group or Flex BG +* neither of the above (contains just data blocks) + +Summary: block bitmap, inode bitmap and inode table are in the first block in a blockgroup or Flex BG, which is a consistent +number. Superblock backups are in specific blocks, calculated by being a block number that is a power of 3, 5 or 7. + +## How to + +Different actions. These all will be replaced by actual code. Things we need to be able to do: + +* walk the tree to a particular directory or file +* inode to data blocks +* read directory entries +* create a new directory entry +* read contents of a file +* write contents to a file + +### Walk the Tree + +In order to get to any particular file or directory in the ext4 filesystem, you need to "walk the tree". +For example, say you want to read the contents of directory `/usr/local/bin/`. + +1. Find the inode of the root directory in the inode table. This **always** is inode 2. +1. Read inode of the root directory to get the data blocks that contain the contents of the root directory. See [inode to data blocks](#inode-to-data-blocks). +1. Read the directory entries in the data blocks to get the names of the files and directories in root. This can be linear or hash. + * linear: read sequentially until you find the one whose name matches the desired subdirectory, for example `usr` + * hash: hash the name and use that to get the correct location +1. Using the matched directory entry, get the inode number for that subdirectory. +1. Use the superblock to read how many inodes are in each block group, e.g. 8144 +1. Calculate which block group contains the inode you are looking for. Using the above example, 0-8143 are in group 0, 8144-16287 are in group 1, etc. +1. Read the inode of that subdirectory in the inode table of the given block group to get the data blocks that contain the contents of that directory. +1. Repeat until you have read the data blocks for the desired entry. + +### Inode to Data Blocks + +Start with the inode + +1. Read the inode +1. Read the `i_block` value, 60 bytes at location 0x28 (= 40) +1. The first 12 bytes are an extent header: + * magic number 0xf30a (little endian) - 2 bytes + * number of entries following the header - 2 bytes - in the inode, always 1, 2, 3, or 4 + * maximum number of entries that could follow the header - 2 bytes - in the inode, always 4 + * depth of this node in the extent tree, where 0 = leaf, parent to that is 1, etc. - 2 bytes + * generation (unused) - 4 bytes +1. Read the entries that follow. + +If the data inside the inode is a leaf node (header depth = 0), then the entries will be leaf entries of 12 bytes: + +* first block in the file that this extent covers - 4 bytes +* number of blocks in this extent - 2 bytes - If the value of this field is <= 32768, the extent is initialized. If the value of the field is > 32768, the extent is uninitialized and the actual extent length is ee_len - 32768. Therefore, the maximum length of a initialized extent is 32768 blocks, and the maximum length of an uninitialized extent is 32767. +* upper 16 bits of the block location - 2 bytes +* lower 32 bits of the block location - 4 bytes + +For example, if a file has 1,000 blocks, and a particular extent entry points to blocks 100-299 of the file, and it starts +at filesystem block 10000, then the entry will be: + +* 100 (4 bytes) +* 200 (2 bytes) - is this correct? This would indicate uninitialized +* 0 (2 bytes) +* 10000 (4 bytes) + +If the data inside the inode is an internal node (header depth > 0), then the entries will be internal entries of 12 bytes: + +* first file block that this extent and all its children cover - 4 bytes +* lower 32 bits of the block number os the extent node on the next lower level - 4 bytes +* upper 16 bits of the block number of the extent node on the next lower level - 2 bytes +* unused - 2 bytes + +For example, if a file has 10,000 blocks, covered in 15 extents, then there will be 15 level 0 extents, and 1 level 1 extent, +and the 15 extents are stored in filesystem block 20000. + +The lower level 0 extent will look like our leaf node example above. +The upper level 1 extent will look like: + +* 0 (4 bytes) - because this starts from file block 0 +* 20000 (4 bytes) - the block number of the extent node on the next lower level +* 0 (2 bytes) - because lower 4 bytes were enough to cover + +You can find all of the blocks simply by looking at the root of the extent tree in the inode. + +* If the extents for the file are 4 or fewer, then the extent tree is stored in the inode itself. +* If the extents for the file are more than 4, but enough to fit the extents in 1-4 blocks, then: + * level 0 extents are stored in a single separate block + * level 1 extents are stored in the inode, with up to 4 entries pointing to the level 0 extents blocks +* If the extents for the file are more than fit in 4 blocks, then: + * level 0 extents are stored in as many blocks as needed + * level 1 extents are stored in other blocks pointing to level 0 extent blocks + * level 2 extents - up to 4 - are stored in the inode + +Each of these is repeated upwards. The maximum at the top of the tree is 4, the maximum in each block is `(blocksize-12)/12`. +Because: + +- each block of extent nodes needs a header of 12 bytes +- each extent node is 12 bytes + +### Read Directory Entries +To read directory entries + +1. Walk the tree until you find the inode for the directory you want. +2. Read the data blocks pointed to by that inode, see [inode to data blocks](#inode-to-data-blocks). +3. Interpret the data blocks. + +The directory itself is just a single "file". It has an inode that indicates the file "length", which is the number of bytes that the listing takes up. + +There are two types of directories: Classic and Hash Tree. Classic are just linear, unsorted, unordered lists of files. They work fine for shorter lists, but large directories can be slow to traverse if they grow too large. Once the contents of the directory "file" will be larger than a single block, ext4 switches it to a Hash Tree Directory Entry. + +Which directory type it is - classical linear or hash tree - does not affect the inode, for which it is just a file, but the contents of the directory entry "file". You can tell if it is linear or hash tree by checking the inode flag `EXT4_INDEX_FL`. If it is set (i.e. `& 0x1000`), then it is a hash tree. + +#### Classic Directory Entry +Each directory entry is at most 263 bytes long. They are arranged in sequential order in the file. The contents are: + +* first four bytes are a `uint32` giving the inode number +* next 2 bytes give the length of the directory entry (max 263) +* next 1 byte gives the length of the file name (which could be calculated from the directory entry length...) +* next 1 byte gives type: unknown, file, directory, char device, block device, FIFO, socket, symlink +* next (up to 255) bytes contain chars with the file or directory name + +The above is for the second version of ext4 directory entry (`ext4_dir_entry_2`). The slightly older version (`ext4_dir_entry`) is similar, except it does not give the file type, which in any case is in the inode. Instead it uses 2 bytes for the file name length. + +#### Hash Tree Directory Entry +Entries in the block are structured as follows: + +* `.` and `..` are the first two entries, and are classic `ext4_dir_entry_2` +* Look in byte `0x1c` to find the hash algorithm +* take the desired file/subdirectory name (just the `basename`) and hash it, see [Calculating the hash value][Calculating the hash value] +* look in the root directory entry in the hashmap to find the relative block number. Note that the block number is relative to the block in the directory, not the filesystem or block group. +* Next step depends on the hash tree depth: + * Depth = 0: read directory entry from the given block. + * Depth > 0: use the block as another lookup table, repeating the steps above, until we come to the depth. +* Once we have the final leaf block given by the hash table, we just read the block sequentially; it will be full of classical directory entries linearly. + +When reading the hashmap, it may not match precisely. Instead, it will fit within a range. The hashmap is sorted by `>=` to `<`. So if the table has entries as follows: + +| Hash | Block | +| -------|-------| +| 0 | 1 | +| 100 | 25 | +| 300 | 16 | + +Then: + +* all hash values from `0`-`99` will be in block `1` +* all hash values from `100-299` will be in block `25` +* all hash values from `300` to infinite will be in block `16` + +##### Calculating the hash value + +The hashing uses one of several algorithms. Most commonly, it is Half MD4. + +MD4 gives a digest length of 128 bits = 16 bytes. + +The "half md4" algorithm is given by the transformation code +[here](https://elixir.bootlin.com/linux/v4.6/source/lib/halfmd4.c#L26). The result +of it is 4 bytes. Those 4 bytes are the input to the hash. + +### Create a Directory Entry + +To create a directory, you need to go through the following steps: + +1. "Walk the tree" to find the parent directory. E.g. if you are creating `/usr/local/foo`, then you need to walk the tree to get to the directory "file" for `/usr/local`. If the parent directory is just the root `/`, e.g. you are creating `/foo`, then you use the root directory, whose inode always is `2`. +2. Determine if the parent directory is classical linear or hash tree, by checking the flag `EXT4_INDEX_FL` in the parent directory's inode. + * if hash: + 1. find a block in the "directory" file with space to add a linear entry + 1. create and add the entry + 1. calculate the hash of the filename + 1. add the `hash:block_number` entry into the tree + 1. rebalance if needed + * if linear, create the entry: + * if adding one will not exceed the size for linear, write it and done + * if adding one will exceed the size for linear, convert to hash, then write it + +#### Hash Tree + +1. Calculate the hash of the new directory entry name +2. Determine which block in the parent directory "file" the new entry should live, based on the hash table. +3. Find the block. +4. Add a classical linear entry at the end of it. +5. Update the inode for the parent directory with the new file size. + +If there is no room at the end of the block, you need to rebalance the hash tree. See below. + +#### Classical Linear + +1. Find the last block in the parent directory "file" + * if there is no room for another entry, extend the file size by another block, and update the inode for the file with the block map +2. Add a classical linear directory entry at the end of it. +3. Update the inode for the parent directory with the new file size, if any. E.g. if the entry fit within padding, there is no change in size. + +If this entry will cause the directory "file" to extend beyond a single block, convert to a hash tree. See below. + +### Rebalance Hash Tree + +Rebalancing the hash tree is rebalancing a btree, where the keys are the hash values. +You only ever need to rebalance when you add or remove an entry. + +#### Adding an entry + +When adding an entry, you only ever need to rebalance the node to which you add it, and parents up to the root. + +1. Calculate the hash of the entry +1. Determine the leaf node into which it should go +1. If the leaf node has less than the maximum number of elements, add it and done +1. If the lead node has the maximum number of elements: + 1. Add the new node in the right place + 1. Find the median + 1. Move the median up to the parent node + 1. If necessary, rebalance the parent node + +#### Removing an entry + +When removing an entry, you only ever need to rebalance the node from which you remove it, and parents up to the root. + +1. Calculate the hash of the entry +1. Determine the leaf node in which it exists +1. If the leaf node has less than the maximum number of elements, add it and done +1. If the lead node has the maximum number of elements: + 1. Add the new node in the right place + 1. Find the median + 1. Move the median up to the parent node + 1. If necessary, rebalance the parent node + +### Convert Classical Linear Directory Entries to Hash Tree + +The conversion usually happens when a single entry will exceed the capacity of a single block. + +1. Switch the flag in the inode to hash-tree +1. Calculate the hash of each entry +1. Create 2 new blocks: + * 1 for the bottom half of the entries + * 1 for the top half of the entries +1. Move the bottom half of the entries into the bottom block +1. Move the top half of the entries into the top block +1. Zero out the current single file block, which previously had the classic linear directory entries +1. Write the header into the tree block, with the 0-hash-value pointing to the bottom block +1. Write one entry after the header, for the lowest hash value of the upper block, pointing to the upper block + +### Read File Contents + +1. Walk the tree until you find the inode for the file you want. +1. Find the data blocks for that inode, see [inode to data blocks](#inode-to-data-blocks). +1. Interpret the data blocks. + +### Create File + +1. Walk the tree until you find the inode for the parent directory. +1. Find a free inode using the inode bitmap. +1. Find a free block using the block bitmap. +1. Create the inode for the new file in the inode table. Be sure to update all the dependencies: + * inode bitmap + * inode table + * inode count in the block group table + * inode count in the superblock +1. Reserve a data block for the new file in the block group table. Be sure to update all the dependencies: + * block bitmap + * block count in the block group table + * block count in the superblock +1. Create the file entry in the parent directory. Depends on if this is classic linear directory or hash tree directory. Note that if it is classic linear, calculate the new size before writing the entry. If it is bigger than a single block, convert to hash tree. TODO: is this the right boundary, single block? + * Classic linear directory: + 1. Find the last block in the parent directory "file" + 1. Add a classical linear directory entry at the end of it + 1. Update the inode for the parent directory with the new file size + * Hash tree directory: + 1. Calculate the hash of the new directory entry name + 1. Determine which block in the parent directory "file" the new entry should live, based on the hash table + 1. Find the block + 1. Add a classical linear entry at the end of it + 1. Update the inode for the parent directory with the new file size + + +### Write File Contents + +1. Walk the tree until you find the inode for the file you want. +1. Find the data blocks for that inode, see [inode to data blocks](#inode-to-data-blocks). +1. Write the data to the data blocks. +1. If the data written exceeds the end of the last block, reserve a new block, update the inode extent tree, and write the data to the new block. +1. Update the inode with the filesize +1. Update the block group table with the used blocks +1. Update the superblock with the used blocks diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/extent.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/extent.go new file mode 100644 index 00000000000..e5d456e0068 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/extent.go @@ -0,0 +1,733 @@ +package ext4 + +import ( + "encoding/binary" + "fmt" + "sort" +) + +const ( + extentTreeHeaderLength int = 12 + extentTreeEntryLength int = 12 + extentHeaderSignature uint16 = 0xf30a + extentTreeMaxDepth int = 5 +) + +// extens a structure holding multiple extents +type extents []extent + +// extent a structure with information about a single contiguous run of blocks containing file data +type extent struct { + // fileBlock block number relative to the file. E.g. if the file is composed of 5 blocks, this could be 0-4 + fileBlock uint32 + // startingBlock the first block on disk that contains the data in this extent. E.g. if the file is made up of data from blocks 100-104 on the disk, this would be 100 + startingBlock uint64 + // count how many contiguous blocks are covered by this extent + count uint16 +} + +// equal if 2 extents are equal +// +//nolint:unused // useful function for future +func (e *extent) equal(a *extent) bool { + if (e == nil && a != nil) || (a == nil && e != nil) { + return false + } + if e == nil && a == nil { + return true + } + return *e == *a +} + +// blockCount how many blocks are covered in the extents +// +//nolint:unused // useful function for future +func (e extents) blockCount() uint64 { + var count uint64 + for _, ext := range e { + count += uint64(ext.count) + } + return count +} + +// extentBlockFinder provides a way of finding the blocks on disk that represent the block range of a given file. +// Arguments are the starting and ending blocks in the file. Returns a slice of blocks to read on disk. +// These blocks are in order. For example, if you ask to read file blocks starting at 20 for a count of 25, then you might +// get a single fileToBlocks{block: 100, count: 25} if the file is contiguous on disk. Or you might get +// fileToBlocks{block: 100, count: 10}, fileToBlocks{block: 200, count: 15} if the file is fragmented on disk. +// The slice should be read in order. +type extentBlockFinder interface { + // findBlocks find the actual blocks for a range in the file, given the start block in the file and how many blocks + findBlocks(start, count uint64, fs *FileSystem) ([]uint64, error) + // blocks get all of the blocks for a file, in sequential order, essentially unravels the tree into a slice of extents + blocks(fs *FileSystem) (extents, error) + // toBytes convert this extentBlockFinder to bytes to be stored in a block or inode + toBytes() []byte + getDepth() uint16 + getMax() uint16 + getBlockSize() uint32 + getFileBlock() uint32 + getCount() uint32 +} + +var ( + _ extentBlockFinder = &extentInternalNode{} + _ extentBlockFinder = &extentLeafNode{} +) + +// extentNodeHeader represents the header of an extent node +type extentNodeHeader struct { + depth uint16 // the depth of tree below here; for leaf nodes, will be 0 + entries uint16 // number of entries + max uint16 // maximum number of entries allowed at this level + blockSize uint32 // block size for this tree +} + +func (e extentNodeHeader) toBytes() []byte { + b := make([]byte, 12) + binary.LittleEndian.PutUint16(b[0:2], extentHeaderSignature) + binary.LittleEndian.PutUint16(b[2:4], e.entries) + binary.LittleEndian.PutUint16(b[4:6], e.max) + binary.LittleEndian.PutUint16(b[6:8], e.depth) + return b +} + +// extentChildPtr represents a child pointer in an internal node of extents +// the child could be a leaf node or another internal node. We only would know +// after parsing diskBlock to see its header. +type extentChildPtr struct { + fileBlock uint32 // extents or children of this cover from file block fileBlock onwards + count uint32 // how many blocks are covered by this extent + diskBlock uint64 // block number where the children live +} + +// extentLeafNode represents a leaf node of extents +// it includes the information in the header and the extents (leaf nodes). +// By definition, this is a leaf node, so depth=0 +type extentLeafNode struct { + extentNodeHeader + extents extents // the actual extents +} + +// findBlocks find the actual blocks for a range in the file. leaf nodes already have all of the data inside, +// so the FileSystem reference is unused. +func (e extentLeafNode) findBlocks(start, count uint64, _ *FileSystem) ([]uint64, error) { + var ret []uint64 + + // before anything, figure out which file block is the start and end of the desired range + end := start + count - 1 + + // we are at the bottom of the tree, so we can just return the extents + for _, ext := range e.extents { + extentStart := uint64(ext.fileBlock) + extentEnd := uint64(ext.fileBlock + uint32(ext.count) - 1) + + // Check if the extent does not overlap with the given block range + if extentEnd < start || extentStart > end { + continue + } + + // Calculate the overlapping range + overlapStart := max(start, extentStart) + overlapEnd := min(end, extentEnd) + + // Calculate the starting disk block for the overlap + diskBlockStart := ext.startingBlock + (overlapStart - extentStart) + + // Append the corresponding disk blocks to the result + for i := uint64(0); i <= overlapEnd-overlapStart; i++ { + ret = append(ret, diskBlockStart+i) + } + } + return ret, nil +} + +// blocks find the actual blocks for a range in the file. leaf nodes already have all of the data inside, +// so the FileSystem reference is unused. +func (e extentLeafNode) blocks(_ *FileSystem) (extents, error) { + return e.extents, nil +} + +// toBytes convert the node to raw bytes to be stored, either in a block or in an inode +func (e extentLeafNode) toBytes() []byte { + // 12 byte header, 12 bytes per child + b := make([]byte, 12+12*e.max) + copy(b[0:12], e.extentNodeHeader.toBytes()) + + for i, ext := range e.extents { + base := (i + 1) * 12 + binary.LittleEndian.PutUint32(b[base:base+4], ext.fileBlock) + binary.LittleEndian.PutUint16(b[base+4:base+6], ext.count) + diskBlock := make([]byte, 8) + binary.LittleEndian.PutUint64(diskBlock, ext.startingBlock) + copy(b[base+6:base+8], diskBlock[4:6]) + copy(b[base+8:base+12], diskBlock[0:4]) + } + return b +} + +func (e *extentLeafNode) getDepth() uint16 { + return e.depth +} + +func (e *extentLeafNode) getMax() uint16 { + return e.max +} + +func (e *extentLeafNode) getBlockSize() uint32 { + return e.blockSize +} + +func (e *extentLeafNode) getFileBlock() uint32 { + return e.extents[0].fileBlock +} + +func (e *extentLeafNode) getCount() uint32 { + return uint32(len(e.extents)) +} + +// extentInternalNode represents an internal node in a tree of extents +// it includes the information in the header and the internal nodes +// By definition, this is an internal node, so depth>0 +type extentInternalNode struct { + extentNodeHeader + children []*extentChildPtr // the children +} + +// findBlocks find the actual blocks for a range in the file. internal nodes need to read the filesystem to +// get the child nodes, so the FileSystem reference is used. +func (e extentInternalNode) findBlocks(start, count uint64, fs *FileSystem) ([]uint64, error) { + var ret []uint64 + + // before anything, figure out which file block is the start and end of the desired range + end := start + count - 1 + + // we are not depth 0, so we have children extent tree nodes. Figure out which ranges we are in. + // the hard part here is that each child has start but not end or count. You only know it from reading the next one. + // So if the one we are looking at is in the range, we get it from the children, and keep going + for _, child := range e.children { + extentStart := uint64(child.fileBlock) + extentEnd := uint64(child.fileBlock + child.count - 1) + + // Check if the extent does not overlap with the given block range + if extentEnd < start || extentStart > end { + continue + } + + // read the extent block from the disk + b, err := fs.readBlock(child.diskBlock) + if err != nil { + return nil, err + } + ebf, err := parseExtents(b, e.blockSize, uint32(extentStart), uint32(extentEnd)) + if err != nil { + return nil, err + } + blocks, err := ebf.findBlocks(extentStart, uint64(child.count), fs) + if err != nil { + return nil, err + } + if len(blocks) > 0 { + ret = append(ret, blocks...) + } + } + return ret, nil +} + +// blocks find the actual blocks for a range in the file. leaf nodes already have all of the data inside, +// so the FileSystem reference is unused. +func (e extentInternalNode) blocks(fs *FileSystem) (extents, error) { + var ret extents + + // we are not depth 0, so we have children extent tree nodes. Walk the tree below us and find all of the blocks + for _, child := range e.children { + // read the extent block from the disk + b, err := fs.readBlock(child.diskBlock) + if err != nil { + return nil, err + } + ebf, err := parseExtents(b, e.blockSize, child.fileBlock, child.fileBlock+child.count-1) + if err != nil { + return nil, err + } + blocks, err := ebf.blocks(fs) + if err != nil { + return nil, err + } + if len(blocks) > 0 { + ret = append(ret, blocks...) + } + } + return ret, nil +} + +// toBytes convert the node to raw bytes to be stored, either in a block or in an inode +func (e extentInternalNode) toBytes() []byte { + // 12 byte header, 12 bytes per child + b := make([]byte, 12+12*e.max) + copy(b[0:12], e.extentNodeHeader.toBytes()) + + for i, child := range e.children { + base := (i + 1) * 12 + binary.LittleEndian.PutUint32(b[base:base+4], child.fileBlock) + diskBlock := make([]byte, 8) + binary.LittleEndian.PutUint64(diskBlock, child.diskBlock) + copy(b[base+4:base+8], diskBlock[0:4]) + copy(b[base+8:base+10], diskBlock[4:6]) + } + return b +} +func (e *extentInternalNode) getDepth() uint16 { + return e.depth +} + +func (e *extentInternalNode) getMax() uint16 { + return e.max +} + +func (e *extentInternalNode) getBlockSize() uint32 { + return e.blockSize +} + +func (e *extentInternalNode) getFileBlock() uint32 { + return e.children[0].fileBlock +} + +func (e *extentInternalNode) getCount() uint32 { + return uint32(len(e.children)) +} + +// parseExtents takes bytes, parses them to find the actual extents or the next blocks down. +// It does not recurse down the tree, as we do not want to do that until we actually are ready +// to read those blocks. This is similar to how ext4 driver in the Linux kernel does it. +// totalBlocks is the total number of blocks covered in this given section of the extent tree. +func parseExtents(b []byte, blocksize, start, count uint32) (extentBlockFinder, error) { + var ret extentBlockFinder + // must have at least header and one entry + minLength := extentTreeHeaderLength + extentTreeEntryLength + if len(b) < minLength { + return nil, fmt.Errorf("cannot parse extent tree from %d bytes, minimum required %d", len(b), minLength) + } + // check magic signature + if binary.LittleEndian.Uint16(b[0:2]) != extentHeaderSignature { + return nil, fmt.Errorf("invalid extent tree signature: %x", b[0x0:0x2]) + } + e := extentNodeHeader{ + entries: binary.LittleEndian.Uint16(b[0x2:0x4]), + max: binary.LittleEndian.Uint16(b[0x4:0x6]), + depth: binary.LittleEndian.Uint16(b[0x6:0x8]), + blockSize: blocksize, + } + // b[0x8:0xc] is used for the generation by Lustre but not standard ext4, so we ignore + + // we have parsed the header, now read either the leaf entries or the intermediate nodes + switch e.depth { + case 0: + leafNode := extentLeafNode{ + extentNodeHeader: e, + } + // read the leaves + for i := 0; i < int(e.entries); i++ { + start := i*extentTreeEntryLength + extentTreeHeaderLength + diskBlock := make([]byte, 8) + copy(diskBlock[0:4], b[start+8:start+12]) + copy(diskBlock[4:6], b[start+6:start+8]) + leafNode.extents = append(leafNode.extents, extent{ + fileBlock: binary.LittleEndian.Uint32(b[start : start+4]), + count: binary.LittleEndian.Uint16(b[start+4 : start+6]), + startingBlock: binary.LittleEndian.Uint64(diskBlock), + }) + } + ret = &leafNode + default: + internalNode := extentInternalNode{ + extentNodeHeader: e, + } + for i := 0; i < int(e.entries); i++ { + start := i*extentTreeEntryLength + extentTreeHeaderLength + diskBlock := make([]byte, 8) + copy(diskBlock[0:4], b[start+4:start+8]) + copy(diskBlock[4:6], b[start+8:start+10]) + ptr := &extentChildPtr{ + diskBlock: binary.LittleEndian.Uint64(diskBlock), + fileBlock: binary.LittleEndian.Uint32(b[start : start+4]), + } + internalNode.children = append(internalNode.children, ptr) + if i > 0 { + internalNode.children[i-1].count = ptr.fileBlock - internalNode.children[i-1].fileBlock + } + } + if len(internalNode.children) > 0 { + internalNode.children[len(internalNode.children)-1].count = start + count - internalNode.children[len(internalNode.children)-1].fileBlock + } + ret = &internalNode + } + + return ret, nil +} + +// extendExtentTree extends extent tree with a slice of new extents +// if the existing tree is nil, create a new one. +// For example, if the input is an extent tree - like the kind found in an inode - and you want to add more extents to it, +// you add the provided extents, and it expands the tree, including creating new internal nodes and writing them to disk, as needed. + +func extendExtentTree(existing extentBlockFinder, added *extents, fs *FileSystem, parent *extentInternalNode) (extentBlockFinder, error) { + // Check if existing is a leaf or internal node + switch node := existing.(type) { + case *extentLeafNode: + return extendLeafNode(node, added, fs, parent) + case *extentInternalNode: + return extendInternalNode(node, added, fs, parent) + case nil: + // brand new extent tree. The root is in the inode, which has a max of 4 extents. + return createRootExtentTree(added, fs) + default: + return nil, fmt.Errorf("unsupported extentBlockFinder type") + } +} + +func createRootExtentTree(added *extents, fs *FileSystem) (extentBlockFinder, error) { + // the root always is in the inode, which has a maximum of 4 extents. If it fits within that, we can just create a leaf node. + if len(*added) <= 4 { + return &extentLeafNode{ + extentNodeHeader: extentNodeHeader{ + depth: 0, + entries: uint16(len(*added)), + max: 4, + blockSize: fs.superblock.blockSize, + }, + extents: *added, + }, nil + } + // in theory, we never should be creating a root internal node. We always should be starting with an extent or two, + // and later expanding the file. + // It might be theoretically possible, though, so we will handle it in the future. + return nil, fmt.Errorf("cannot create root internal node") +} + +func extendLeafNode(node *extentLeafNode, added *extents, fs *FileSystem, parent *extentInternalNode) (extentBlockFinder, error) { + // Check if the leaf node has enough space for the added extents + if len(node.extents)+len(*added) <= int(node.max) { + // Simply append the extents if there's enough space + node.extents = append(node.extents, *added...) + node.entries = uint16(len(node.extents)) + + // Write the updated node back to the disk + err := writeNodeToDisk(node, fs, parent) + if err != nil { + return nil, err + } + + return node, nil + } + + // If not enough space, split the node + newNodes, err := splitLeafNode(node, added, fs, parent) + if err != nil { + return nil, err + } + + // Check if the original node was the root + if parent == nil { + // Create a new internal node to reference the split leaf nodes + var newNodesAsBlockFinder []extentBlockFinder + for _, n := range newNodes { + newNodesAsBlockFinder = append(newNodesAsBlockFinder, n) + } + newRoot := createInternalNode(newNodesAsBlockFinder, nil, fs) + return newRoot, nil + } + + // If the original node was not the root, handle the parent internal node + parentNode, err := getParentNode(node, fs) + if err != nil { + return nil, err + } + + return extendInternalNode(parentNode, added, fs, parent) +} + +func splitLeafNode(node *extentLeafNode, added *extents, fs *FileSystem, parent *extentInternalNode) ([]*extentLeafNode, error) { + // Combine existing and new extents + allExtents := node.extents + allExtents = append(allExtents, *added...) + // Sort extents by fileBlock to maintain order + sort.Slice(allExtents, func(i, j int) bool { + return allExtents[i].fileBlock < allExtents[j].fileBlock + }) + + // Calculate the midpoint to split the extents + mid := len(allExtents) / 2 + + // Create the first new leaf node + firstLeaf := &extentLeafNode{ + extentNodeHeader: extentNodeHeader{ + depth: 0, + entries: uint16(mid), + max: node.max, + blockSize: node.blockSize, + }, + extents: allExtents[:mid], + } + + // Create the second new leaf node + secondLeaf := &extentLeafNode{ + extentNodeHeader: extentNodeHeader{ + depth: 0, + entries: uint16(len(allExtents) - mid), + max: node.max, + blockSize: node.blockSize, + }, + extents: allExtents[mid:], + } + + // Write new leaf nodes to the disk + err := writeNodeToDisk(firstLeaf, fs, parent) + if err != nil { + return nil, err + } + err = writeNodeToDisk(secondLeaf, fs, parent) + if err != nil { + return nil, err + } + + return []*extentLeafNode{firstLeaf, secondLeaf}, nil +} + +func createInternalNode(nodes []extentBlockFinder, parent *extentInternalNode, fs *FileSystem) *extentInternalNode { + internalNode := &extentInternalNode{ + extentNodeHeader: extentNodeHeader{ + depth: nodes[0].getDepth() + 1, // Depth is 1 more than the children + entries: uint16(len(nodes)), + max: nodes[0].getMax(), // Assuming uniform max for all nodes + blockSize: nodes[0].getBlockSize(), + }, + children: make([]*extentChildPtr, len(nodes)), + } + + for i, node := range nodes { + internalNode.children[i] = &extentChildPtr{ + fileBlock: node.getFileBlock(), + count: node.getCount(), + diskBlock: getBlockNumberFromNode(node, parent), + } + } + + // Write the new internal node to the disk + err := writeNodeToDisk(internalNode, fs, parent) + if err != nil { + return nil + } + + return internalNode +} + +func getBlockNumberFromNode(node extentBlockFinder, parent *extentInternalNode) uint64 { + for _, childPtr := range parent.children { + if childPtrMatchesNode(childPtr, node) { + return childPtr.diskBlock + } + } + return 0 // Return 0 or an appropriate error value if the block number is not found +} + +// Helper function to match a child pointer to a node +func childPtrMatchesNode(childPtr *extentChildPtr, node extentBlockFinder) bool { + switch n := node.(type) { + case *extentLeafNode: + return childPtr.fileBlock == n.extents[0].fileBlock + case *extentInternalNode: + // Logic to determine if the childPtr matches the internal node + // Placeholder: Implement based on your specific matching criteria + return true + default: + return false + } +} + +func extendInternalNode(node *extentInternalNode, added *extents, fs *FileSystem, parent *extentInternalNode) (extentBlockFinder, error) { + // Find the appropriate child node to extend + childIndex := findChildNode(node, added) + childPtr := node.children[childIndex] + + // Load the actual child node from the disk + childNode, err := loadChildNode(childPtr, fs) + if err != nil { + return nil, err + } + + // Recursively extend the child node + updatedChild, err := extendExtentTree(childNode, added, fs, node) + if err != nil { + return nil, err + } + + // Update the current internal node to reference the updated child + switch updatedChild := updatedChild.(type) { + case *extentLeafNode: + node.children[childIndex] = &extentChildPtr{ + fileBlock: updatedChild.extents[0].fileBlock, + count: uint32(len(updatedChild.extents)), + diskBlock: getBlockNumberFromNode(updatedChild, node), + } + case *extentInternalNode: + node.children[childIndex] = &extentChildPtr{ + fileBlock: updatedChild.children[0].fileBlock, + count: uint32(len(updatedChild.children)), + diskBlock: getBlockNumberFromNode(updatedChild, node), + } + default: + return nil, fmt.Errorf("unsupported updatedChild type") + } + + // Check if the internal node is at capacity + if len(node.children) > int(node.max) { + // Split the internal node if it's at capacity + newInternalNodes, err := splitInternalNode(node, node.children[childIndex], fs, parent) + if err != nil { + return nil, err + } + + // Check if the original node was the root + if parent == nil { + // Create a new internal node as the new root + var newNodesAsBlockFinder []extentBlockFinder + for _, n := range newInternalNodes { + newNodesAsBlockFinder = append(newNodesAsBlockFinder, n) + } + newRoot := createInternalNode(newNodesAsBlockFinder, nil, fs) + return newRoot, nil + } + + // If the original node was not the root, handle the parent internal node + return extendInternalNode(parent, added, fs, parent) + } + + // Write the updated node back to the disk + err = writeNodeToDisk(node, fs, parent) + if err != nil { + return nil, err + } + + return node, nil +} + +// Helper function to get the parent node of a given internal node +// +//nolint:revive // this parameter will be used eventually +func getParentNode(node extentBlockFinder, fs *FileSystem) (*extentInternalNode, error) { + // Logic to find and return the parent node of the given node + // This is a placeholder and needs to be implemented based on your specific tree structure + return nil, fmt.Errorf("getParentNode not implemented") +} + +func splitInternalNode(node *extentInternalNode, newChild *extentChildPtr, fs *FileSystem, parent *extentInternalNode) ([]*extentInternalNode, error) { + // Combine existing children with the new child + allChildren := node.children + allChildren = append(allChildren, newChild) + // Sort children by fileBlock to maintain order + sort.Slice(allChildren, func(i, j int) bool { + return allChildren[i].fileBlock < allChildren[j].fileBlock + }) + + // Calculate the midpoint to split the children + mid := len(allChildren) / 2 + + // Create the first new internal node + firstInternal := &extentInternalNode{ + extentNodeHeader: extentNodeHeader{ + depth: node.depth, + entries: uint16(mid), + max: node.max, + blockSize: node.blockSize, + }, + children: allChildren[:mid], + } + + // Create the second new internal node + secondInternal := &extentInternalNode{ + extentNodeHeader: extentNodeHeader{ + depth: node.depth, + entries: uint16(len(allChildren) - mid), + max: node.max, + blockSize: node.blockSize, + }, + children: allChildren[mid:], + } + + // Write new internal nodes to the disk + err := writeNodeToDisk(firstInternal, fs, parent) + if err != nil { + return nil, err + } + err = writeNodeToDisk(secondInternal, fs, parent) + if err != nil { + return nil, err + } + + return []*extentInternalNode{firstInternal, secondInternal}, nil +} + +func writeNodeToDisk(node extentBlockFinder, fs *FileSystem, parent *extentInternalNode) error { + var blockNumber uint64 + if parent != nil { + blockNumber = getBlockNumberFromNode(node, parent) + } else { + blockNumber = getNewBlockNumber(fs) + } + + if blockNumber == 0 { + return fmt.Errorf("block number not found for node") + } + + data := node.toBytes() + _, err := fs.file.WriteAt(data, int64(blockNumber)*int64(fs.superblock.blockSize)) + return err +} + +// Helper function to get a new block number when there is no parent +// +//nolint:revive // this parameter will be used eventually +func getNewBlockNumber(fs *FileSystem) uint64 { + // Logic to allocate a new block + // This is a placeholder and needs to be implemented based on your specific filesystem structure + return 0 // Placeholder: Replace with actual implementation +} + +// Helper function to find the block number of a child node from its parent +func findChildBlockNumber(parent *extentInternalNode, child extentBlockFinder) uint64 { + for _, childPtr := range parent.children { + if childPtrMatchesNode(childPtr, child) { + return childPtr.diskBlock + } + } + return 0 +} + +func findChildNode(node *extentInternalNode, added *extents) int { + // Assuming added extents are sorted, find the correct child node to extend + addedSlice := *added + for i, child := range node.children { + if addedSlice[0].fileBlock < child.fileBlock { + return i - 1 + } + } + return len(node.children) - 1 +} + +// loadChildNode load up a child node from the disk +// +//nolint:unparam // this parameter will be used eventually +func loadChildNode(childPtr *extentChildPtr, fs *FileSystem) (extentBlockFinder, error) { + data := make([]byte, fs.superblock.blockSize) + _, err := fs.file.ReadAt(data, int64(childPtr.diskBlock)*int64(fs.superblock.blockSize)) + if err != nil { + return nil, err + } + + // Logic to decode data into an extentBlockFinder (extentLeafNode or extentInternalNode) + // This is a placeholder and needs to be implemented based on your specific encoding scheme + var node extentBlockFinder + // Implement the logic to decode the node from the data + return node, nil +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/features.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/features.go new file mode 100644 index 00000000000..9a8baa9ebbe --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/features.go @@ -0,0 +1,451 @@ +package ext4 + +// features are defined +// beginning at https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/ext2fs/ext2_fs.h#n820 + +// featureFlags is a structure holding which flags are set - compatible, incompatible and read-only compatible +type featureFlags struct { + // compatible, incompatible, and compatibleReadOnly feature flags + directoryPreAllocate bool + imagicInodes bool + hasJournal bool + extendedAttributes bool + reservedGDTBlocksForExpansion bool + directoryIndices bool + lazyBlockGroup bool + excludeInode bool + excludeBitmap bool + sparseSuperBlockV2 bool + fastCommit bool + stableInodes bool + orphanFile bool + compression bool + directoryEntriesRecordFileType bool + recoveryNeeded bool + separateJournalDevice bool + metaBlockGroups bool + extents bool + fs64Bit bool + multipleMountProtection bool + flexBlockGroups bool + extendedAttributeInodes bool + dataInDirectoryEntries bool + metadataChecksumSeedInSuperblock bool + largeDirectory bool + dataInInode bool + encryptInodes bool + sparseSuperblock bool + largeFile bool + btreeDirectory bool + hugeFile bool + gdtChecksum bool + largeSubdirectoryCount bool + largeInodes bool + snapshot bool + quota bool + bigalloc bool + metadataChecksums bool + replicas bool + readOnly bool + projectQuotas bool +} + +func parseFeatureFlags(compatFlags, incompatFlags, roCompatFlags uint32) featureFlags { + f := featureFlags{ + directoryPreAllocate: compatFeatureDirectoryPreAllocate.included(compatFlags), + imagicInodes: compatFeatureImagicInodes.included(compatFlags), + hasJournal: compatFeatureHasJournal.included(compatFlags), + extendedAttributes: compatFeatureExtendedAttributes.included(compatFlags), + reservedGDTBlocksForExpansion: compatFeatureReservedGDTBlocksForExpansion.included(compatFlags), + directoryIndices: compatFeatureDirectoryIndices.included(compatFlags), + lazyBlockGroup: compatFeatureLazyBlockGroup.included(compatFlags), + excludeInode: compatFeatureExcludeInode.included(compatFlags), + excludeBitmap: compatFeatureExcludeBitmap.included(compatFlags), + sparseSuperBlockV2: compatFeatureSparseSuperBlockV2.included(compatFlags), + fastCommit: compatFeatureFastCommit.included(compatFlags), + stableInodes: compatFeatureStableInodes.included(compatFlags), + orphanFile: compatFeatureOrphanFile.included(compatFlags), + compression: incompatFeatureCompression.included(incompatFlags), + directoryEntriesRecordFileType: incompatFeatureDirectoryEntriesRecordFileType.included(incompatFlags), + recoveryNeeded: incompatFeatureRecoveryNeeded.included(incompatFlags), + separateJournalDevice: incompatFeatureSeparateJournalDevice.included(incompatFlags), + metaBlockGroups: incompatFeatureMetaBlockGroups.included(incompatFlags), + extents: incompatFeatureExtents.included(incompatFlags), + fs64Bit: incompatFeature64Bit.included(incompatFlags), + multipleMountProtection: incompatFeatureMultipleMountProtection.included(incompatFlags), + flexBlockGroups: incompatFeatureFlexBlockGroups.included(incompatFlags), + extendedAttributeInodes: incompatFeatureExtendedAttributeInodes.included(incompatFlags), + dataInDirectoryEntries: incompatFeatureDataInDirectoryEntries.included(incompatFlags), + metadataChecksumSeedInSuperblock: incompatFeatureMetadataChecksumSeedInSuperblock.included(incompatFlags), + largeDirectory: incompatFeatureLargeDirectory.included(incompatFlags), + dataInInode: incompatFeatureDataInInode.included(incompatFlags), + encryptInodes: incompatFeatureEncryptInodes.included(incompatFlags), + sparseSuperblock: roCompatFeatureSparseSuperblock.included(roCompatFlags), + largeFile: roCompatFeatureLargeFile.included(roCompatFlags), + btreeDirectory: roCompatFeatureBtreeDirectory.included(roCompatFlags), + hugeFile: roCompatFeatureHugeFile.included(roCompatFlags), + gdtChecksum: roCompatFeatureGDTChecksum.included(roCompatFlags), + largeSubdirectoryCount: roCompatFeatureLargeSubdirectoryCount.included(roCompatFlags), + largeInodes: roCompatFeatureLargeInodes.included(roCompatFlags), + snapshot: roCompatFeatureSnapshot.included(roCompatFlags), + quota: roCompatFeatureQuota.included(roCompatFlags), + bigalloc: roCompatFeatureBigalloc.included(roCompatFlags), + metadataChecksums: roCompatFeatureMetadataChecksums.included(roCompatFlags), + replicas: roCompatFeatureReplicas.included(roCompatFlags), + readOnly: roCompatFeatureReadOnly.included(roCompatFlags), + projectQuotas: roCompatFeatureProjectQuotas.included(roCompatFlags), + } + + return f +} + +//nolint:gocyclo // we know this has cyclomatic complexity, but not worth breaking apart +func (f *featureFlags) toInts() (compatFlags, incompatFlags, roCompatFlags uint32) { + // compatible flags + if f.directoryPreAllocate { + compatFlags |= uint32(compatFeatureDirectoryPreAllocate) + } + if f.imagicInodes { + compatFlags |= uint32(compatFeatureImagicInodes) + } + if f.hasJournal { + compatFlags |= uint32(compatFeatureHasJournal) + } + if f.extendedAttributes { + compatFlags |= uint32(compatFeatureExtendedAttributes) + } + if f.reservedGDTBlocksForExpansion { + compatFlags |= uint32(compatFeatureReservedGDTBlocksForExpansion) + } + if f.directoryIndices { + compatFlags |= uint32(compatFeatureDirectoryIndices) + } + if f.lazyBlockGroup { + compatFlags |= uint32(compatFeatureLazyBlockGroup) + } + if f.excludeInode { + compatFlags |= uint32(compatFeatureExcludeInode) + } + if f.excludeBitmap { + compatFlags |= uint32(compatFeatureExcludeBitmap) + } + if f.sparseSuperBlockV2 { + compatFlags |= uint32(compatFeatureSparseSuperBlockV2) + } + if f.fastCommit { + compatFlags |= uint32(compatFeatureFastCommit) + } + if f.stableInodes { + compatFlags |= uint32(compatFeatureStableInodes) + } + if f.orphanFile { + compatFlags |= uint32(compatFeatureOrphanFile) + } + + // incompatible flags + if f.compression { + incompatFlags |= uint32(incompatFeatureCompression) + } + if f.directoryEntriesRecordFileType { + incompatFlags |= uint32(incompatFeatureDirectoryEntriesRecordFileType) + } + if f.recoveryNeeded { + incompatFlags |= uint32(incompatFeatureRecoveryNeeded) + } + if f.separateJournalDevice { + incompatFlags |= uint32(incompatFeatureSeparateJournalDevice) + } + if f.metaBlockGroups { + incompatFlags |= uint32(incompatFeatureMetaBlockGroups) + } + if f.extents { + incompatFlags |= uint32(incompatFeatureExtents) + } + if f.fs64Bit { + incompatFlags |= uint32(incompatFeature64Bit) + } + if f.multipleMountProtection { + incompatFlags |= uint32(incompatFeatureMultipleMountProtection) + } + if f.flexBlockGroups { + incompatFlags |= uint32(incompatFeatureFlexBlockGroups) + } + if f.extendedAttributeInodes { + incompatFlags |= uint32(incompatFeatureExtendedAttributeInodes) + } + if f.dataInDirectoryEntries { + incompatFlags |= uint32(incompatFeatureDataInDirectoryEntries) + } + if f.metadataChecksumSeedInSuperblock { + incompatFlags |= uint32(incompatFeatureMetadataChecksumSeedInSuperblock) + } + if f.largeDirectory { + incompatFlags |= uint32(incompatFeatureLargeDirectory) + } + if f.dataInInode { + incompatFlags |= uint32(incompatFeatureDataInInode) + } + if f.encryptInodes { + incompatFlags |= uint32(incompatFeatureEncryptInodes) + } + + // read only compatible flags + if f.sparseSuperblock { + roCompatFlags |= uint32(roCompatFeatureSparseSuperblock) + } + if f.largeFile { + roCompatFlags |= uint32(roCompatFeatureLargeFile) + } + if f.btreeDirectory { + roCompatFlags |= uint32(roCompatFeatureBtreeDirectory) + } + if f.hugeFile { + roCompatFlags |= uint32(roCompatFeatureHugeFile) + } + if f.gdtChecksum { + roCompatFlags |= uint32(roCompatFeatureGDTChecksum) + } + if f.largeSubdirectoryCount { + roCompatFlags |= uint32(roCompatFeatureLargeSubdirectoryCount) + } + if f.largeInodes { + roCompatFlags |= uint32(roCompatFeatureLargeInodes) + } + if f.snapshot { + roCompatFlags |= uint32(roCompatFeatureSnapshot) + } + if f.quota { + roCompatFlags |= uint32(roCompatFeatureQuota) + } + if f.bigalloc { + roCompatFlags |= uint32(roCompatFeatureBigalloc) + } + if f.metadataChecksums { + roCompatFlags |= uint32(roCompatFeatureMetadataChecksums) + } + if f.replicas { + roCompatFlags |= uint32(roCompatFeatureReplicas) + } + if f.readOnly { + roCompatFlags |= uint32(roCompatFeatureReadOnly) + } + if f.projectQuotas { + roCompatFlags |= uint32(roCompatFeatureProjectQuotas) + } + + return compatFlags, incompatFlags, roCompatFlags +} + +// default features +/* + base_features = sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr + features = has_journal,extent,huge_file,flex_bg,uninit_bg,64bit,dir_nlink,extra_isize +*/ +var defaultFeatureFlags = featureFlags{ + largeFile: true, + hugeFile: true, + sparseSuperblock: true, + flexBlockGroups: true, + hasJournal: true, + extents: true, + fs64Bit: true, + extendedAttributes: true, +} + +type FeatureOpt func(*featureFlags) + +func WithFeatureDirectoryPreAllocate(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.directoryPreAllocate = enable + } +} +func WithFeatureImagicInodes(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.imagicInodes = enable + } +} +func WithFeatureHasJournal(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.hasJournal = enable + } +} +func WithFeatureExtendedAttributes(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.extendedAttributes = enable + } +} +func WithFeatureReservedGDTBlocksForExpansion(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.reservedGDTBlocksForExpansion = enable + } +} +func WithFeatureDirectoryIndices(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.directoryIndices = enable + } +} +func WithFeatureLazyBlockGroup(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.lazyBlockGroup = enable + } +} +func WithFeatureExcludeInode(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.excludeInode = enable + } +} +func WithFeatureExcludeBitmap(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.excludeBitmap = enable + } +} +func WithFeatureSparseSuperBlockV2(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.sparseSuperBlockV2 = enable + } +} +func WithFeatureCompression(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.compression = enable + } +} +func WithFeatureDirectoryEntriesRecordFileType(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.directoryEntriesRecordFileType = enable + } +} +func WithFeatureRecoveryNeeded(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.recoveryNeeded = enable + } +} +func WithFeatureSeparateJournalDevice(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.separateJournalDevice = enable + } +} +func WithFeatureMetaBlockGroups(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.metaBlockGroups = enable + } +} +func WithFeatureExtents(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.extents = enable + } +} +func WithFeatureFS64Bit(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.fs64Bit = enable + } +} +func WithFeatureMultipleMountProtection(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.multipleMountProtection = enable + } +} +func WithFeatureFlexBlockGroups(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.flexBlockGroups = enable + } +} +func WithFeatureExtendedAttributeInodes(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.extendedAttributeInodes = enable + } +} +func WithFeatureDataInDirectoryEntries(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.dataInDirectoryEntries = enable + } +} +func WithFeatureMetadataChecksumSeedInSuperblock(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.metadataChecksumSeedInSuperblock = enable + } +} +func WithFeatureLargeDirectory(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.largeDirectory = enable + } +} +func WithFeatureDataInInode(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.dataInInode = enable + } +} +func WithFeatureEncryptInodes(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.encryptInodes = enable + } +} +func WithFeatureSparseSuperblock(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.sparseSuperblock = enable + } +} +func WithFeatureLargeFile(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.largeFile = enable + } +} +func WithFeatureBTreeDirectory(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.btreeDirectory = enable + } +} +func WithFeatureHugeFile(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.hugeFile = enable + } +} +func WithFeatureGDTChecksum(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.gdtChecksum = enable + } +} +func WithFeatureLargeSubdirectoryCount(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.largeSubdirectoryCount = enable + } +} +func WithFeatureLargeInodes(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.largeInodes = enable + } +} +func WithFeatureSnapshot(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.snapshot = enable + } +} +func WithFeatureQuota(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.quota = enable + } +} +func WithFeatureBigalloc(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.bigalloc = enable + } +} +func WithFeatureMetadataChecksums(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.metadataChecksums = enable + } +} +func WithFeatureReplicas(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.replicas = enable + } +} +func WithFeatureReadOnly(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.readOnly = enable + } +} +func WithFeatureProjectQuotas(enable bool) FeatureOpt { + return func(o *featureFlags) { + o.projectQuotas = enable + } +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/file.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/file.go new file mode 100644 index 00000000000..4dc653956c6 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/file.go @@ -0,0 +1,208 @@ +package ext4 + +import ( + "fmt" + "io" +) + +// File represents a single file in an ext4 filesystem +type File struct { + *directoryEntry + *inode + isReadWrite bool + isAppend bool + offset int64 + filesystem *FileSystem + extents extents +} + +// Read reads up to len(b) bytes from the File. +// It returns the number of bytes read and any error encountered. +// At end of file, Read returns 0, io.EOF +// reads from the last known offset in the file from last read or write +// use Seek() to set at a particular point +func (fl *File) Read(b []byte) (int, error) { + var ( + fileSize = int64(fl.size) + blocksize = uint64(fl.filesystem.superblock.blockSize) + ) + if fl.offset >= fileSize { + return 0, io.EOF + } + + // Calculate the number of bytes to read + bytesToRead := int64(len(b)) + if fl.offset+bytesToRead > fileSize { + bytesToRead = fileSize - fl.offset + } + + // Create a buffer to hold the bytes to be read + readBytes := int64(0) + b = b[:bytesToRead] + + // the offset given for reading is relative to the file, so we need to calculate + // where these are in the extents relative to the file + readStartBlock := uint64(fl.offset) / blocksize + for _, e := range fl.extents { + // if the last block of the extent is before the first block we want to read, skip it + if uint64(e.fileBlock)+uint64(e.count) < readStartBlock { + continue + } + // extentSize is the number of bytes on the disk for the extent + extentSize := int64(e.count) * int64(blocksize) + // where do we start and end in the extent? + startPositionInExtent := fl.offset - int64(e.fileBlock)*int64(blocksize) + leftInExtent := extentSize - startPositionInExtent + // how many bytes are left to read + toReadInOffset := bytesToRead - readBytes + if toReadInOffset > leftInExtent { + toReadInOffset = leftInExtent + } + // read those bytes + startPosOnDisk := e.startingBlock*blocksize + uint64(startPositionInExtent) + b2 := make([]byte, toReadInOffset) + read, err := fl.filesystem.file.ReadAt(b2, int64(startPosOnDisk)) + if err != nil { + return int(readBytes), fmt.Errorf("failed to read bytes: %v", err) + } + copy(b[readBytes:], b2[:read]) + readBytes += int64(read) + fl.offset += int64(read) + + if readBytes >= bytesToRead { + break + } + } + var err error + if fl.offset >= fileSize { + err = io.EOF + } + + return int(readBytes), err +} + +// Write writes len(b) bytes to the File. +// It returns the number of bytes written and an error, if any. +// returns a non-nil error when n != len(b) +// writes to the last known offset in the file from last read or write +// use Seek() to set at a particular point +func (fl *File) Write(b []byte) (int, error) { + var ( + fileSize = int64(fl.size) + originalFileSize = int64(fl.size) + blockCount = fl.blocks + originalBlockCount = fl.blocks + blocksize = uint64(fl.filesystem.superblock.blockSize) + ) + if !fl.isReadWrite { + return 0, fmt.Errorf("file is not open for writing") + } + + // if adding these bytes goes past the filesize, update the inode filesize to the new size and write the inode + // if adding these bytes goes past the total number of blocks, add more blocks, update the inode block count and write the inode + // if the offset is greater than the filesize, update the inode filesize to the offset + if fl.offset >= fileSize { + fl.size = uint64(fl.offset) + } + + // Calculate the number of bytes to write + bytesToWrite := int64(len(b)) + + offsetAfterWrite := fl.offset + bytesToWrite + if offsetAfterWrite > int64(fl.size) { + fl.size = uint64(fl.offset + bytesToWrite) + } + + // calculate the number of blocks in the file post-write + newBlockCount := fl.size / blocksize + if fl.size%blocksize > 0 { + newBlockCount++ + } + blocksNeeded := newBlockCount - blockCount + bytesNeeded := blocksNeeded * blocksize + if newBlockCount > blockCount { + newExtents, err := fl.filesystem.allocateExtents(bytesNeeded, &fl.extents) + if err != nil { + return 0, fmt.Errorf("could not allocate disk space for file %w", err) + } + extentTreeParsed, err := extendExtentTree(fl.inode.extents, newExtents, fl.filesystem, nil) + if err != nil { + return 0, fmt.Errorf("could not convert extents into tree: %w", err) + } + fl.inode.extents = extentTreeParsed + fl.blocks = newBlockCount + } + + if originalFileSize != int64(fl.size) || originalBlockCount != fl.blocks { + err := fl.filesystem.writeInode(fl.inode) + if err != nil { + return 0, fmt.Errorf("could not write inode: %w", err) + } + } + + writtenBytes := int64(0) + + // the offset given for reading is relative to the file, so we need to calculate + // where these are in the extents relative to the file + writeStartBlock := uint64(fl.offset) / blocksize + for _, e := range fl.extents { + // if the last block of the extent is before the first block we want to write, skip it + if uint64(e.fileBlock)+uint64(e.count) < writeStartBlock { + continue + } + // extentSize is the number of bytes on the disk for the extent + extentSize := int64(e.count) * int64(blocksize) + // where do we start and end in the extent? + startPositionInExtent := fl.offset - int64(e.fileBlock)*int64(blocksize) + leftInExtent := extentSize - startPositionInExtent + // how many bytes are left in the extent? + toWriteInOffset := bytesToWrite - writtenBytes + if toWriteInOffset > leftInExtent { + toWriteInOffset = leftInExtent + } + // read those bytes + startPosOnDisk := e.startingBlock*blocksize + uint64(startPositionInExtent) + b2 := make([]byte, toWriteInOffset) + copy(b2, b[writtenBytes:]) + written, err := fl.filesystem.file.WriteAt(b2, int64(startPosOnDisk)) + if err != nil { + return int(writtenBytes), fmt.Errorf("failed to read bytes: %v", err) + } + writtenBytes += int64(written) + fl.offset += int64(written) + + if written >= len(b) { + break + } + } + var err error + if fl.offset >= fileSize { + err = io.EOF + } + + return int(writtenBytes), err +} + +// Seek set the offset to a particular point in the file +func (fl *File) Seek(offset int64, whence int) (int64, error) { + newOffset := int64(0) + switch whence { + case io.SeekStart: + newOffset = offset + case io.SeekEnd: + newOffset = int64(fl.size) + offset + case io.SeekCurrent: + newOffset = fl.offset + offset + } + if newOffset < 0 { + return fl.offset, fmt.Errorf("cannot set offset %d before start of file", offset) + } + fl.offset = newOffset + return fl.offset, nil +} + +// Close close a file that is being read +func (fl *File) Close() error { + *fl = File{} + return nil +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/fileinfo.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/fileinfo.go new file mode 100644 index 00000000000..4a6e5c3aacb --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/fileinfo.go @@ -0,0 +1,48 @@ +package ext4 + +import ( + "os" + "time" +) + +// FileInfo represents the information for an individual file +// it fulfills os.FileInfo interface +type FileInfo struct { + modTime time.Time + mode os.FileMode + name string + size int64 + isDir bool +} + +// IsDir abbreviation for Mode().IsDir() +func (fi *FileInfo) IsDir() bool { + return fi.isDir +} + +// ModTime modification time +func (fi *FileInfo) ModTime() time.Time { + return fi.modTime +} + +// Mode returns file mode +func (fi *FileInfo) Mode() os.FileMode { + return fi.mode +} + +// Name base name of the file +// +// will return the long name of the file. If none exists, returns the shortname and extension +func (fi *FileInfo) Name() string { + return fi.name +} + +// Size length in bytes for regular files +func (fi *FileInfo) Size() int64 { + return fi.size +} + +// Sys underlying data source - not supported yet and so will return nil +func (fi *FileInfo) Sys() interface{} { + return nil +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/groupdescriptors.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/groupdescriptors.go new file mode 100644 index 00000000000..995cda05e86 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/groupdescriptors.go @@ -0,0 +1,327 @@ +package ext4 + +import ( + "cmp" + "encoding/binary" + "fmt" + "slices" + + "github.com/diskfs/go-diskfs/filesystem/ext4/crc" +) + +type blockGroupFlag uint16 +type gdtChecksumType uint8 + +func (b blockGroupFlag) included(a uint16) bool { + return a&uint16(b) == uint16(b) +} + +//nolint:unused // will be used in the future, not yet +func (g gdtChecksumType) included(a uint8) bool { + return a&uint8(g) == uint8(g) +} + +const ( + groupDescriptorSize uint16 = 32 + groupDescriptorSize64Bit uint16 = 64 + blockGroupFlagInodesUninitialized blockGroupFlag = 0x1 + blockGroupFlagBlockBitmapUninitialized blockGroupFlag = 0x2 + blockGroupFlagInodeTableZeroed blockGroupFlag = 0x4 + gdtChecksumNone gdtChecksumType = 0 + gdtChecksumGdt gdtChecksumType = 1 + gdtChecksumMetadata gdtChecksumType = 2 +) + +type blockGroupFlags struct { + inodesUninitialized bool + blockBitmapUninitialized bool + inodeTableZeroed bool +} + +// groupdescriptors is a structure holding all of the group descriptors for all of the block groups +type groupDescriptors struct { + descriptors []groupDescriptor +} + +// groupDescriptor is a structure holding the data about a single block group +type groupDescriptor struct { + blockBitmapLocation uint64 + inodeBitmapLocation uint64 + inodeTableLocation uint64 + freeBlocks uint32 + freeInodes uint32 + usedDirectories uint32 + flags blockGroupFlags + snapshotExclusionBitmapLocation uint64 + blockBitmapChecksum uint32 + inodeBitmapChecksum uint32 + unusedInodes uint32 + size uint16 + number uint16 +} + +func (gd *groupDescriptor) equal(other *groupDescriptor) bool { + if other == nil { + return gd == nil + } + return *gd == *other +} + +func (gds *groupDescriptors) equal(a *groupDescriptors) bool { + if gds == nil && a == nil { + return true + } + if (gds == nil && a != nil) || (a == nil && gds != nil) || len(gds.descriptors) != len(a.descriptors) { + return false + } + + // both not nil, same size, so compare them + for i, g := range gds.descriptors { + if g != a.descriptors[i] { + return false + } + } + // if we made it this far, all the same + return true +} + +// groupDescriptorsFromBytes create a groupDescriptors struct from bytes +func groupDescriptorsFromBytes(b []byte, gdSize uint16, hashSeed uint32, checksumType gdtChecksumType) (*groupDescriptors, error) { + gds := groupDescriptors{} + gdSlice := make([]groupDescriptor, 0, 10) + + count := len(b) / int(gdSize) + + // go through them gdSize bytes at a time + for i := 0; i < count; i++ { + start := i * int(gdSize) + end := start + int(gdSize) + gd, err := groupDescriptorFromBytes(b[start:end], gdSize, i, checksumType, hashSeed) + if err != nil || gd == nil { + return nil, fmt.Errorf("error creating group descriptor from bytes: %w", err) + } + gdSlice = append(gdSlice, *gd) + } + gds.descriptors = gdSlice + + return &gds, nil +} + +// toBytes returns groupDescriptors ready to be written to disk +func (gds *groupDescriptors) toBytes(checksumType gdtChecksumType, hashSeed uint32) []byte { + b := make([]byte, 0, 10*groupDescriptorSize) + for _, gd := range gds.descriptors { + b2 := gd.toBytes(checksumType, hashSeed) + b = append(b, b2...) + } + + return b +} + +// byFreeBlocks provides a sorted list of groupDescriptors by free blocks, descending. +// If you want them ascending, sort if. +func (gds *groupDescriptors) byFreeBlocks() []groupDescriptor { + // make a copy of the slice + gdSlice := make([]groupDescriptor, len(gds.descriptors)) + copy(gdSlice, gds.descriptors) + + // sort the slice + slices.SortFunc(gdSlice, func(a, b groupDescriptor) int { + return cmp.Compare(a.freeBlocks, b.freeBlocks) + }) + + return gdSlice +} + +// groupDescriptorFromBytes create a groupDescriptor struct from bytes +func groupDescriptorFromBytes(b []byte, gdSize uint16, number int, checksumType gdtChecksumType, hashSeed uint32) (*groupDescriptor, error) { + // block count, reserved block count and free blocks depends on whether the fs is 64-bit or not + blockBitmapLocation := make([]byte, 8) + inodeBitmapLocation := make([]byte, 8) + inodeTableLocation := make([]byte, 8) + freeBlocks := make([]byte, 4) + freeInodes := make([]byte, 4) + usedirectories := make([]byte, 4) + snapshotExclusionBitmapLocation := make([]byte, 8) + blockBitmapChecksum := make([]byte, 4) + inodeBitmapChecksum := make([]byte, 4) + unusedInodes := make([]byte, 4) + + copy(blockBitmapLocation[0:4], b[0x0:0x4]) + copy(inodeBitmapLocation[0:4], b[0x4:0x8]) + copy(inodeTableLocation[0:4], b[0x8:0xc]) + copy(freeBlocks[0:2], b[0xc:0xe]) + copy(freeInodes[0:2], b[0xe:0x10]) + copy(usedirectories[0:2], b[0x10:0x12]) + copy(snapshotExclusionBitmapLocation[0:4], b[0x14:0x18]) + copy(blockBitmapChecksum[0:2], b[0x18:0x1a]) + copy(inodeBitmapChecksum[0:2], b[0x1a:0x1c]) + copy(unusedInodes[0:2], b[0x1c:0x1e]) + + if gdSize == 64 { + copy(blockBitmapLocation[4:8], b[0x20:0x24]) + copy(inodeBitmapLocation[4:8], b[0x24:0x28]) + copy(inodeTableLocation[4:8], b[0x28:0x2c]) + copy(freeBlocks[2:4], b[0x2c:0x2e]) + copy(freeInodes[2:4], b[0x2e:0x30]) + copy(usedirectories[2:4], b[0x30:0x32]) + copy(unusedInodes[2:4], b[0x32:0x34]) + copy(snapshotExclusionBitmapLocation[4:8], b[0x34:0x38]) + copy(blockBitmapChecksum[2:4], b[0x38:0x3a]) + copy(inodeBitmapChecksum[2:4], b[0x3a:0x3c]) + } + + gdNumber := uint16(number) + // only bother with checking the checksum if it was not type none (pre-checksums) + if checksumType != gdtChecksumNone { + checksum := binary.LittleEndian.Uint16(b[0x1e:0x20]) + actualChecksum := groupDescriptorChecksum(b[0x0:0x40], hashSeed, gdNumber, checksumType) + if checksum != actualChecksum { + return nil, fmt.Errorf("checksum mismatch, passed %x, actual %x", checksum, actualChecksum) + } + } + + gd := groupDescriptor{ + size: gdSize, + number: gdNumber, + blockBitmapLocation: binary.LittleEndian.Uint64(blockBitmapLocation), + inodeBitmapLocation: binary.LittleEndian.Uint64(inodeBitmapLocation), + inodeTableLocation: binary.LittleEndian.Uint64(inodeTableLocation), + freeBlocks: binary.LittleEndian.Uint32(freeBlocks), + freeInodes: binary.LittleEndian.Uint32(freeInodes), + usedDirectories: binary.LittleEndian.Uint32(usedirectories), + snapshotExclusionBitmapLocation: binary.LittleEndian.Uint64(snapshotExclusionBitmapLocation), + blockBitmapChecksum: binary.LittleEndian.Uint32(blockBitmapChecksum), + inodeBitmapChecksum: binary.LittleEndian.Uint32(inodeBitmapChecksum), + unusedInodes: binary.LittleEndian.Uint32(unusedInodes), + flags: parseBlockGroupFlags(binary.LittleEndian.Uint16(b[0x12:0x14])), + } + + return &gd, nil +} + +// toBytes returns a groupDescriptor ready to be written to disk +func (gd *groupDescriptor) toBytes(checksumType gdtChecksumType, hashSeed uint32) []byte { + gdSize := gd.size + + b := make([]byte, gdSize) + + blockBitmapLocation := make([]byte, 8) + inodeBitmapLocation := make([]byte, 8) + inodeTableLocation := make([]byte, 8) + freeBlocks := make([]byte, 4) + freeInodes := make([]byte, 4) + usedirectories := make([]byte, 4) + snapshotExclusionBitmapLocation := make([]byte, 8) + blockBitmapChecksum := make([]byte, 4) + inodeBitmapChecksum := make([]byte, 4) + unusedInodes := make([]byte, 4) + + binary.LittleEndian.PutUint64(blockBitmapLocation, gd.blockBitmapLocation) + binary.LittleEndian.PutUint64(inodeTableLocation, gd.inodeTableLocation) + binary.LittleEndian.PutUint64(inodeBitmapLocation, gd.inodeBitmapLocation) + binary.LittleEndian.PutUint32(freeBlocks, gd.freeBlocks) + binary.LittleEndian.PutUint32(freeInodes, gd.freeInodes) + binary.LittleEndian.PutUint32(usedirectories, gd.usedDirectories) + binary.LittleEndian.PutUint64(snapshotExclusionBitmapLocation, gd.snapshotExclusionBitmapLocation) + binary.LittleEndian.PutUint32(blockBitmapChecksum, gd.blockBitmapChecksum) + binary.LittleEndian.PutUint32(inodeBitmapChecksum, gd.inodeBitmapChecksum) + binary.LittleEndian.PutUint32(unusedInodes, gd.unusedInodes) + + // copy the lower 32 bytes in + copy(b[0x0:0x4], blockBitmapLocation[0:4]) + copy(b[0x4:0x8], inodeBitmapLocation[0:4]) + copy(b[0x8:0xc], inodeTableLocation[0:4]) + copy(b[0xc:0xe], freeBlocks[0:2]) + copy(b[0xe:0x10], freeInodes[0:2]) + copy(b[0x10:0x12], usedirectories[0:2]) + binary.LittleEndian.PutUint16(b[0x12:0x14], gd.flags.toInt()) + copy(b[0x14:0x18], snapshotExclusionBitmapLocation[0:4]) + copy(b[0x18:0x1a], blockBitmapChecksum[0:2]) + copy(b[0x1a:0x1c], inodeBitmapChecksum[0:2]) + copy(b[0x1c:0x1e], unusedInodes[0:2]) + + // now for the upper 32 bytes + if gd.size == 64 { + copy(b[0x20:0x24], blockBitmapLocation[4:8]) + copy(b[0x24:0x28], inodeBitmapLocation[4:8]) + copy(b[0x28:0x2c], inodeTableLocation[4:8]) + copy(b[0x2c:0x2e], freeBlocks[2:4]) + copy(b[0x2e:0x30], freeInodes[2:4]) + copy(b[0x30:0x32], usedirectories[2:4]) + copy(b[0x32:0x34], unusedInodes[2:4]) + copy(b[0x34:0x38], snapshotExclusionBitmapLocation[4:8]) + copy(b[0x38:0x3a], blockBitmapChecksum[2:4]) + copy(b[0x3a:0x3c], inodeBitmapChecksum[2:4]) + } + + checksum := groupDescriptorChecksum(b[0x0:0x40], hashSeed, gd.number, checksumType) + binary.LittleEndian.PutUint16(b[0x1e:0x20], checksum) + + return b +} + +func parseBlockGroupFlags(flags uint16) blockGroupFlags { + f := blockGroupFlags{ + inodeTableZeroed: blockGroupFlagInodeTableZeroed.included(flags), + inodesUninitialized: blockGroupFlagInodesUninitialized.included(flags), + blockBitmapUninitialized: blockGroupFlagBlockBitmapUninitialized.included(flags), + } + + return f +} + +func (f *blockGroupFlags) toInt() uint16 { + var ( + flags uint16 + ) + + // compatible flags + if f.inodeTableZeroed { + flags |= uint16(blockGroupFlagInodeTableZeroed) + } + if f.inodesUninitialized { + flags |= uint16(blockGroupFlagInodesUninitialized) + } + if f.blockBitmapUninitialized { + flags |= uint16(blockGroupFlagBlockBitmapUninitialized) + } + return flags +} + +// groupDescriptorChecksum calculate the checksum for a block group descriptor +// NOTE: we are assuming that the block group number is uint64, but we do not know that to be true +// +// it might be uint32 or uint64, and it might be in BigEndian as opposed to LittleEndian +// just have to start with this and see +// we do know that the maximum number of block groups in 32-bit mode is 2^19, which must be uint32 +// and in 64-bit mode it is 2^51 which must be uint64 +// So we start with uint32 = [4]byte{} for regular mode and [8]byte{} for mod32 +func groupDescriptorChecksum(b []byte, hashSeed uint32, groupNumber uint16, checksumType gdtChecksumType) uint16 { + var checksum uint16 + + numBytes := make([]byte, 4) + binary.LittleEndian.PutUint16(numBytes, groupNumber) + switch checksumType { + case gdtChecksumNone: + checksum = 0 + case gdtChecksumMetadata: + // metadata checksum applies groupNumber to seed, then zeroes out checksum bytes from entire descriptor, then applies descriptor bytes + crcResult := crc.CRC32c(hashSeed, numBytes) + b2 := make([]byte, len(b)) + copy(b2, b) + b2[0x1e] = 0 + b2[0x1f] = 0 + crcResult = crc.CRC32c(crcResult, b2) + checksum = uint16(crcResult & 0xffff) + case gdtChecksumGdt: + hashSeed16 := uint16(hashSeed & 0xffff) + crcResult := crc.CRC16(hashSeed16, numBytes) + b2 := make([]byte, len(b)) + copy(b2, b) + b2[0x1e] = 0 + b2[0x1f] = 0 + checksum = crc.CRC16(crcResult, b) + } + return checksum +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/inode.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/inode.go new file mode 100644 index 00000000000..b760c0cf672 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/inode.go @@ -0,0 +1,588 @@ +package ext4 + +import ( + "encoding/binary" + "fmt" + "time" + + "github.com/diskfs/go-diskfs/filesystem/ext4/crc" +) + +type inodeFlag uint32 +type fileType uint16 + +func (i inodeFlag) included(a uint32) bool { + return a&uint32(i) == uint32(i) +} + +const ( + ext2InodeSize uint16 = 128 + // minInodeSize is ext2 + the extra min 32 bytes in ext4 + minInodeExtraSize uint16 = 32 + wantInodeExtraSize uint16 = 128 + minInodeSize uint16 = ext2InodeSize + minInodeExtraSize + extentInodeMaxEntries int = 4 + inodeFlagSecureDeletion inodeFlag = 0x1 + inodeFlagPreserveForUndeletion inodeFlag = 0x2 + inodeFlagCompressed inodeFlag = 0x4 + inodeFlagSynchronous inodeFlag = 0x8 + inodeFlagImmutable inodeFlag = 0x10 + inodeFlagAppendOnly inodeFlag = 0x20 + inodeFlagNoDump inodeFlag = 0x40 + inodeFlagNoAccessTimeUpdate inodeFlag = 0x80 + inodeFlagDirtyCompressed inodeFlag = 0x100 + inodeFlagCompressedClusters inodeFlag = 0x200 + inodeFlagNoCompress inodeFlag = 0x400 + inodeFlagEncryptedInode inodeFlag = 0x800 + inodeFlagHashedDirectoryIndexes inodeFlag = 0x1000 + inodeFlagAFSMagicDirectory inodeFlag = 0x2000 + inodeFlagAlwaysJournal inodeFlag = 0x4000 + inodeFlagNoMergeTail inodeFlag = 0x8000 + inodeFlagSyncDirectoryData inodeFlag = 0x10000 + inodeFlagTopDirectory inodeFlag = 0x20000 + inodeFlagHugeFile inodeFlag = 0x40000 + inodeFlagUsesExtents inodeFlag = 0x80000 + inodeFlagExtendedAttributes inodeFlag = 0x200000 + inodeFlagBlocksPastEOF inodeFlag = 0x400000 + inodeFlagSnapshot inodeFlag = 0x1000000 + inodeFlagDeletingSnapshot inodeFlag = 0x4000000 + inodeFlagCompletedSnapshotShrink inodeFlag = 0x8000000 + inodeFlagInlineData inodeFlag = 0x10000000 + inodeFlagInheritProject inodeFlag = 0x20000000 + + fileTypeFifo fileType = 0x1000 + fileTypeCharacterDevice fileType = 0x2000 + fileTypeDirectory fileType = 0x4000 + fileTypeBlockDevice fileType = 0x6000 + fileTypeRegularFile fileType = 0x8000 + fileTypeSymbolicLink fileType = 0xA000 + fileTypeSocket fileType = 0xC000 + + filePermissionsOwnerExecute uint16 = 0x40 + filePermissionsOwnerWrite uint16 = 0x80 + filePermissionsOwnerRead uint16 = 0x100 + filePermissionsGroupExecute uint16 = 0x8 + filePermissionsGroupWrite uint16 = 0x10 + filePermissionsGroupRead uint16 = 0x20 + filePermissionsOtherExecute uint16 = 0x1 + filePermissionsOtherWrite uint16 = 0x2 + filePermissionsOtherRead uint16 = 0x4 +) + +// mountOptions is a structure holding flags for an inode +type inodeFlags struct { + secureDeletion bool + preserveForUndeletion bool + compressed bool + synchronous bool + immutable bool + appendOnly bool + noDump bool + noAccessTimeUpdate bool + dirtyCompressed bool + compressedClusters bool + noCompress bool + encryptedInode bool + hashedDirectoryIndexes bool + AFSMagicDirectory bool + alwaysJournal bool + noMergeTail bool + syncDirectoryData bool + topDirectory bool + hugeFile bool + usesExtents bool + extendedAttributes bool + blocksPastEOF bool + snapshot bool + deletingSnapshot bool + completedSnapshotShrink bool + inlineData bool + inheritProject bool +} + +type filePermissions struct { + read bool + write bool + execute bool +} + +// inode is a structure holding the data about an inode +type inode struct { + number uint32 + permissionsOther filePermissions + permissionsGroup filePermissions + permissionsOwner filePermissions + fileType fileType + owner uint32 + group uint32 + size uint64 + accessTime time.Time + changeTime time.Time + modifyTime time.Time + createTime time.Time + deletionTime uint32 + hardLinks uint16 + blocks uint64 + filesystemBlocks bool + flags *inodeFlags + version uint64 + nfsFileVersion uint32 + extendedAttributeBlock uint64 + inodeSize uint16 + project uint32 + extents extentBlockFinder + linkTarget string +} + +//nolint:unused // will be used in the future, not yet +func (i *inode) equal(a *inode) bool { + if (i == nil && a != nil) || (a == nil && i != nil) { + return false + } + if i == nil && a == nil { + return true + } + return *i == *a +} + +// inodeFromBytes create an inode struct from bytes +func inodeFromBytes(b []byte, sb *superblock, number uint32) (*inode, error) { + // safely make sure it is the min size + if len(b) < int(minInodeSize) { + return nil, fmt.Errorf("inode data too short: %d bytes, must be min %d bytes", len(b), minInodeSize) + } + + // checksum before using the data + checksumBytes := make([]byte, 4) + + // checksum before using the data + copy(checksumBytes[0:2], b[0x7c:0x7e]) + copy(checksumBytes[2:4], b[0x82:0x84]) + // zero out checksum fields before calculating the checksum + b[0x7c] = 0 + b[0x7d] = 0 + b[0x82] = 0 + b[0x83] = 0 + + // block count, reserved block count and free blocks depends on whether the fs is 64-bit or not + owner := make([]byte, 4) + fileSize := make([]byte, 8) + group := make([]byte, 4) + accessTime := make([]byte, 8) + changeTime := make([]byte, 8) + modifyTime := make([]byte, 8) + createTime := make([]byte, 8) + version := make([]byte, 8) + extendedAttributeBlock := make([]byte, 8) + + mode := binary.LittleEndian.Uint16(b[0x0:0x2]) + + copy(owner[0:2], b[0x2:0x4]) + copy(owner[2:4], b[0x78:0x7a]) + copy(group[0:2], b[0x18:0x20]) + copy(group[2:4], b[0x7a:0x7c]) + copy(fileSize[0:4], b[0x4:0x8]) + copy(fileSize[4:8], b[0x6c:0x70]) + copy(version[0:4], b[0x24:0x28]) + copy(version[4:8], b[0x98:0x9c]) + copy(extendedAttributeBlock[0:4], b[0x88:0x8c]) + copy(extendedAttributeBlock[4:6], b[0x76:0x78]) + + // get the the times + // the structure is as follows: + // original 32 bits (0:4) are seconds. Add (to the left) 2 more bits from the 32 + // the remaining 30 bites are nanoseconds + copy(accessTime[0:4], b[0x8:0xc]) + // take the two bits relevant and add to fifth byte + accessTime[4] = b[0x8c] & 0x3 + copy(changeTime[0:4], b[0xc:0x10]) + changeTime[4] = b[0x84] & 0x3 + copy(modifyTime[0:4], b[0x10:0x14]) + modifyTime[4] = b[0x88] & 0x3 + copy(createTime[0:4], b[0x90:0x94]) + createTime[4] = b[0x94] & 0x3 + + accessTimeSeconds := binary.LittleEndian.Uint64(accessTime) + changeTimeSeconds := binary.LittleEndian.Uint64(changeTime) + modifyTimeSeconds := binary.LittleEndian.Uint64(modifyTime) + createTimeSeconds := binary.LittleEndian.Uint64(createTime) + + // now get the nanoseconds by using the upper 30 bites + accessTimeNanoseconds := binary.LittleEndian.Uint32(b[0x8c:0x90]) >> 2 + changeTimeNanoseconds := binary.LittleEndian.Uint32(b[0x84:0x88]) >> 2 + modifyTimeNanoseconds := binary.LittleEndian.Uint32(b[0x88:0x8c]) >> 2 + createTimeNanoseconds := binary.LittleEndian.Uint32(b[0x94:0x98]) >> 2 + + flagsNum := binary.LittleEndian.Uint32(b[0x20:0x24]) + + flags := parseInodeFlags(flagsNum) + + blocksLow := binary.LittleEndian.Uint32(b[0x1c:0x20]) + blocksHigh := binary.LittleEndian.Uint16(b[0x74:0x76]) + var ( + blocks uint64 + filesystemBlocks bool + ) + + hugeFile := sb.features.hugeFile + switch { + case !hugeFile: + // just 512-byte blocks + blocks = uint64(blocksLow) + filesystemBlocks = false + case hugeFile && !flags.hugeFile: + // larger number of 512-byte blocks + blocks = uint64(blocksHigh)<<32 + uint64(blocksLow) + filesystemBlocks = false + default: + // larger number of filesystem blocks + blocks = uint64(blocksHigh)<<32 + uint64(blocksLow) + filesystemBlocks = true + } + fileType := parseFileType(mode) + fileSizeNum := binary.LittleEndian.Uint64(fileSize) + + extentInfo := make([]byte, 60) + copy(extentInfo, b[0x28:0x64]) + // symlinks might store link target in extentInfo, or might store them elsewhere + var ( + linkTarget string + allExtents extentBlockFinder + err error + ) + if fileType == fileTypeSymbolicLink && fileSizeNum < 60 { + linkTarget = string(extentInfo[:fileSizeNum]) + } else { + // parse the extent information in the inode to get the root of the extents tree + // we do not walk the entire tree, to get a slice of blocks for the file. + // If we want to do that, we call the extentBlockFinder.blocks() method + allExtents, err = parseExtents(extentInfo, sb.blockSize, 0, uint32(blocks)) + if err != nil { + return nil, fmt.Errorf("error parsing extent tree: %v", err) + } + } + + i := inode{ + number: number, + permissionsGroup: parseGroupPermissions(mode), + permissionsOwner: parseOwnerPermissions(mode), + permissionsOther: parseOtherPermissions(mode), + fileType: fileType, + owner: binary.LittleEndian.Uint32(owner), + group: binary.LittleEndian.Uint32(group), + size: fileSizeNum, + hardLinks: binary.LittleEndian.Uint16(b[0x1a:0x1c]), + blocks: blocks, + filesystemBlocks: filesystemBlocks, + flags: &flags, + nfsFileVersion: binary.LittleEndian.Uint32(b[0x64:0x68]), + version: binary.LittleEndian.Uint64(version), + inodeSize: binary.LittleEndian.Uint16(b[0x80:0x82]) + minInodeSize, + deletionTime: binary.LittleEndian.Uint32(b[0x14:0x18]), + accessTime: time.Unix(int64(accessTimeSeconds), int64(accessTimeNanoseconds)), + changeTime: time.Unix(int64(changeTimeSeconds), int64(changeTimeNanoseconds)), + modifyTime: time.Unix(int64(modifyTimeSeconds), int64(modifyTimeNanoseconds)), + createTime: time.Unix(int64(createTimeSeconds), int64(createTimeNanoseconds)), + extendedAttributeBlock: binary.LittleEndian.Uint64(extendedAttributeBlock), + project: binary.LittleEndian.Uint32(b[0x9c:0x100]), + extents: allExtents, + linkTarget: linkTarget, + } + checksum := binary.LittleEndian.Uint32(checksumBytes) + actualChecksum := inodeChecksum(b, sb.checksumSeed, number, i.nfsFileVersion) + + if actualChecksum != checksum { + return nil, fmt.Errorf("checksum mismatch, on-disk %x vs calculated %x", checksum, actualChecksum) + } + + return &i, nil +} + +// toBytes returns an inode ready to be written to disk +// +//nolint:unused // will be used in the future, not yet +func (i *inode) toBytes(sb *superblock) []byte { + iSize := sb.inodeSize + + b := make([]byte, iSize) + + mode := make([]byte, 2) + owner := make([]byte, 4) + fileSize := make([]byte, 8) + group := make([]byte, 4) + accessTime := make([]byte, 8) + changeTime := make([]byte, 8) + modifyTime := make([]byte, 8) + createTime := make([]byte, 8) + version := make([]byte, 8) + extendedAttributeBlock := make([]byte, 8) + + binary.LittleEndian.PutUint16(mode, i.permissionsGroup.toGroupInt()|i.permissionsOther.toOtherInt()|i.permissionsOwner.toOwnerInt()|uint16(i.fileType)) + binary.LittleEndian.PutUint32(owner, i.owner) + binary.LittleEndian.PutUint32(group, i.group) + binary.LittleEndian.PutUint64(fileSize, i.size) + binary.LittleEndian.PutUint64(version, i.version) + binary.LittleEndian.PutUint64(extendedAttributeBlock, i.extendedAttributeBlock) + + // there is some odd stuff that ext4 does with nanoseconds. We might need this in the future. + // See https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout#Inode_Timestamps + // binary.LittleEndian.PutUint32(accessTime[4:8], (i.accessTimeNanoseconds<<2)&accessTime[4]) + binary.LittleEndian.PutUint64(accessTime, uint64(i.accessTime.Unix())) + binary.LittleEndian.PutUint32(accessTime[4:8], uint32(i.accessTime.Nanosecond())) + binary.LittleEndian.PutUint64(createTime, uint64(i.createTime.Unix())) + binary.LittleEndian.PutUint32(createTime[4:8], uint32(i.createTime.Nanosecond())) + binary.LittleEndian.PutUint64(changeTime, uint64(i.changeTime.Unix())) + binary.LittleEndian.PutUint32(changeTime[4:8], uint32(i.changeTime.Nanosecond())) + binary.LittleEndian.PutUint64(modifyTime, uint64(i.modifyTime.Unix())) + binary.LittleEndian.PutUint32(modifyTime[4:8], uint32(i.modifyTime.Nanosecond())) + + blocks := make([]byte, 8) + binary.LittleEndian.PutUint64(blocks, i.blocks) + + copy(b[0x0:0x2], mode) + copy(b[0x2:0x4], owner[0:2]) + copy(b[0x4:0x8], fileSize[0:4]) + copy(b[0x8:0xc], accessTime[0:4]) + copy(b[0xc:0x10], changeTime[0:4]) + copy(b[0x10:0x14], modifyTime[0:4]) + + binary.LittleEndian.PutUint32(b[0x14:0x18], i.deletionTime) + copy(b[0x18:0x1a], group[0:2]) + binary.LittleEndian.PutUint16(b[0x1a:0x1c], i.hardLinks) + copy(b[0x1c:0x20], blocks[0:4]) + binary.LittleEndian.PutUint32(b[0x20:0x24], i.flags.toInt()) + copy(b[0x24:0x28], version[0:4]) + copy(b[0x28:0x64], i.extents.toBytes()) + binary.LittleEndian.PutUint32(b[0x64:0x68], i.nfsFileVersion) + copy(b[0x68:0x6c], extendedAttributeBlock[0:4]) + copy(b[0x6c:0x70], fileSize[4:8]) + // b[0x70:0x74] is obsolete + copy(b[0x74:0x76], blocks[4:8]) + copy(b[0x76:0x78], extendedAttributeBlock[4:6]) + copy(b[0x78:0x7a], owner[2:4]) + copy(b[0x7a:0x7c], group[2:4]) + // b[0x7c:0x7e] is for checkeum + // b[0x7e:0x80] is unused + binary.LittleEndian.PutUint16(b[0x80:0x82], i.inodeSize-minInodeSize) + // b[0x82:0x84] is for checkeum + copy(b[0x84:0x88], changeTime[4:8]) + copy(b[0x88:0x8c], modifyTime[4:8]) + copy(b[0x8c:0x90], accessTime[4:8]) + copy(b[0x90:0x94], createTime[0:4]) + copy(b[0x94:0x98], createTime[4:8]) + + actualChecksum := inodeChecksum(b, sb.checksumSeed, i.number, i.nfsFileVersion) + checksum := make([]byte, 4) + binary.LittleEndian.PutUint32(checksum, actualChecksum) + copy(b[0x7c:0x7e], checksum[0:2]) + copy(b[0x82:0x84], checksum[2:4]) + + return b +} + +func parseOwnerPermissions(mode uint16) filePermissions { + return filePermissions{ + execute: mode&filePermissionsOwnerExecute == filePermissionsOwnerExecute, + write: mode&filePermissionsOwnerWrite == filePermissionsOwnerWrite, + read: mode&filePermissionsOwnerRead == filePermissionsOwnerRead, + } +} +func parseGroupPermissions(mode uint16) filePermissions { + return filePermissions{ + execute: mode&filePermissionsGroupExecute == filePermissionsGroupExecute, + write: mode&filePermissionsGroupWrite == filePermissionsGroupWrite, + read: mode&filePermissionsGroupRead == filePermissionsGroupRead, + } +} +func parseOtherPermissions(mode uint16) filePermissions { + return filePermissions{ + execute: mode&filePermissionsOtherExecute == filePermissionsOtherExecute, + write: mode&filePermissionsOtherWrite == filePermissionsOtherWrite, + read: mode&filePermissionsOtherRead == filePermissionsOtherRead, + } +} + +//nolint:unused // will be used in the future, not yet +func (fp *filePermissions) toOwnerInt() uint16 { + var mode uint16 + if fp.execute { + mode |= filePermissionsOwnerExecute + } + if fp.write { + mode |= filePermissionsOwnerWrite + } + if fp.read { + mode |= filePermissionsOwnerRead + } + return mode +} + +//nolint:unused // will be used in the future, not yet +func (fp *filePermissions) toOtherInt() uint16 { + var mode uint16 + if fp.execute { + mode |= filePermissionsOtherExecute + } + if fp.write { + mode |= filePermissionsOtherWrite + } + if fp.read { + mode |= filePermissionsOtherRead + } + return mode +} + +//nolint:unused // will be used in the future, not yet +func (fp *filePermissions) toGroupInt() uint16 { + var mode uint16 + if fp.execute { + mode |= filePermissionsGroupExecute + } + if fp.write { + mode |= filePermissionsGroupWrite + } + if fp.read { + mode |= filePermissionsGroupRead + } + return mode +} + +// parseFileType from the uint16 mode. The mode is built of bottom 12 bits +// being "any of" several permissions, and thus resolved via AND, +// while the top 4 bits are "only one of" several types, and thus resolved via just equal. +func parseFileType(mode uint16) fileType { + return fileType(mode & 0xF000) +} + +func parseInodeFlags(flags uint32) inodeFlags { + return inodeFlags{ + secureDeletion: inodeFlagSecureDeletion.included(flags), + preserveForUndeletion: inodeFlagPreserveForUndeletion.included(flags), + compressed: inodeFlagCompressed.included(flags), + synchronous: inodeFlagSynchronous.included(flags), + immutable: inodeFlagImmutable.included(flags), + appendOnly: inodeFlagAppendOnly.included(flags), + noDump: inodeFlagNoDump.included(flags), + noAccessTimeUpdate: inodeFlagNoAccessTimeUpdate.included(flags), + dirtyCompressed: inodeFlagDirtyCompressed.included(flags), + compressedClusters: inodeFlagCompressedClusters.included(flags), + noCompress: inodeFlagNoCompress.included(flags), + encryptedInode: inodeFlagEncryptedInode.included(flags), + hashedDirectoryIndexes: inodeFlagHashedDirectoryIndexes.included(flags), + AFSMagicDirectory: inodeFlagAFSMagicDirectory.included(flags), + alwaysJournal: inodeFlagAlwaysJournal.included(flags), + noMergeTail: inodeFlagNoMergeTail.included(flags), + syncDirectoryData: inodeFlagSyncDirectoryData.included(flags), + topDirectory: inodeFlagTopDirectory.included(flags), + hugeFile: inodeFlagHugeFile.included(flags), + usesExtents: inodeFlagUsesExtents.included(flags), + extendedAttributes: inodeFlagExtendedAttributes.included(flags), + blocksPastEOF: inodeFlagBlocksPastEOF.included(flags), + snapshot: inodeFlagSnapshot.included(flags), + deletingSnapshot: inodeFlagDeletingSnapshot.included(flags), + completedSnapshotShrink: inodeFlagCompletedSnapshotShrink.included(flags), + inlineData: inodeFlagInlineData.included(flags), + inheritProject: inodeFlagInheritProject.included(flags), + } +} + +//nolint:unused // will be used in the future, not yet +func (i *inodeFlags) toInt() uint32 { + var flags uint32 + + if i.secureDeletion { + flags |= uint32(inodeFlagSecureDeletion) + } + if i.preserveForUndeletion { + flags |= uint32(inodeFlagPreserveForUndeletion) + } + if i.compressed { + flags |= uint32(inodeFlagCompressed) + } + if i.synchronous { + flags |= uint32(inodeFlagSynchronous) + } + if i.immutable { + flags |= uint32(inodeFlagImmutable) + } + if i.appendOnly { + flags |= uint32(inodeFlagAppendOnly) + } + if i.noDump { + flags |= uint32(inodeFlagNoDump) + } + if i.noAccessTimeUpdate { + flags |= uint32(inodeFlagNoAccessTimeUpdate) + } + if i.dirtyCompressed { + flags |= uint32(inodeFlagDirtyCompressed) + } + if i.compressedClusters { + flags |= uint32(inodeFlagCompressedClusters) + } + if i.noCompress { + flags |= uint32(inodeFlagNoCompress) + } + if i.encryptedInode { + flags |= uint32(inodeFlagEncryptedInode) + } + if i.hashedDirectoryIndexes { + flags |= uint32(inodeFlagHashedDirectoryIndexes) + } + if i.AFSMagicDirectory { + flags |= uint32(inodeFlagAFSMagicDirectory) + } + if i.alwaysJournal { + flags |= uint32(inodeFlagAlwaysJournal) + } + if i.noMergeTail { + flags |= uint32(inodeFlagNoMergeTail) + } + if i.syncDirectoryData { + flags |= uint32(inodeFlagSyncDirectoryData) + } + if i.topDirectory { + flags |= uint32(inodeFlagTopDirectory) + } + if i.hugeFile { + flags |= uint32(inodeFlagHugeFile) + } + if i.usesExtents { + flags |= uint32(inodeFlagUsesExtents) + } + if i.extendedAttributes { + flags |= uint32(inodeFlagExtendedAttributes) + } + if i.blocksPastEOF { + flags |= uint32(inodeFlagBlocksPastEOF) + } + if i.snapshot { + flags |= uint32(inodeFlagSnapshot) + } + if i.deletingSnapshot { + flags |= uint32(inodeFlagDeletingSnapshot) + } + if i.completedSnapshotShrink { + flags |= uint32(inodeFlagCompletedSnapshotShrink) + } + if i.inlineData { + flags |= uint32(inodeFlagInlineData) + } + if i.inheritProject { + flags |= uint32(inodeFlagInheritProject) + } + + return flags +} + +// inodeChecksum calculate the checksum for an inode +func inodeChecksum(b []byte, checksumSeed, inodeNumber, inodeGeneration uint32) uint32 { + numberBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(numberBytes, inodeNumber) + crcResult := crc.CRC32c(checksumSeed, numberBytes) + genBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(genBytes, inodeGeneration) + crcResult = crc.CRC32c(crcResult, genBytes) + checksum := crc.CRC32c(crcResult, b) + return checksum +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_other.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_other.go new file mode 100644 index 00000000000..09a61488e6d --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_other.go @@ -0,0 +1,12 @@ +//go:build !linux && !unix && !darwin && !windows + +package ext4 + +import ( + "fmt" + "runtime" +) + +func journalDevice(devicePath string) (deviceNumber uint32, err error) { + return 0, fmt.Errorf("external journal device unsupported on filesystem %s", runtime.GOOS) +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_shared.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_shared.go new file mode 100644 index 00000000000..00a91da9b85 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_shared.go @@ -0,0 +1,40 @@ +//go:build linux || unix || freebsd || netbsd || openbsd || darwin + +package ext4 + +import ( + "fmt" + "math" + + "golang.org/x/sys/unix" +) + +func journalDevice(devicePath string) (deviceNumber uint32, err error) { + // Use unix.Stat to get file status + var stat unix.Stat_t + err = unix.Stat(devicePath, &stat) + if err != nil { + return deviceNumber, err + } + + // Extract major and minor device numbers + //nolint:unconvert,nolintlint // lint stumbles on this, thinks it is an unnecessary conversion, which is true + // on Linux, but not on others. So we will be explicit about this, and add a nolint flag + major := unix.Major(uint64(stat.Rdev)) + //nolint:unconvert,nolintlint // lint stumbles on this, thinks it is an unnecessary conversion, which is true + // on Linux, but not on others. So we will be explicit about this, and add a nolint flag + minor := unix.Minor(uint64(stat.Rdev)) + + // Combine major and minor numbers using unix.Mkdev + // interestingly, this does not 100% align with what I read about linux mkdev works, which would be: + // const minorbits = 20 + // func mkdev(major, minor uint32) uint32 { + // return (((major) << minorbits) | (minor)) + // } + // we leave this here for a future potential fix + journalDeviceNumber64 := unix.Mkdev(major, minor) + if journalDeviceNumber64 > math.MaxUint32 { + return deviceNumber, fmt.Errorf("journal device number %d is too large", journalDeviceNumber64) + } + return uint32(journalDeviceNumber64), nil +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_windows.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_windows.go new file mode 100644 index 00000000000..bf36fb2e38a --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/journaldevice_windows.go @@ -0,0 +1,11 @@ +//go:build windows + +package ext4 + +import ( + "errors" +) + +func journalDevice(devicePath string) (deviceNumber uint32, err error) { + return 0, errors.New("external journal device unsupported on Windows") +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/md4/md4.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/md4/md4.go new file mode 100644 index 00000000000..77df42700b7 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/md4/md4.go @@ -0,0 +1,73 @@ +package md4 + +// rotateLeft rotates a 32-bit integer to the left +func rotateLeft(x uint32, s uint) uint32 { + return (x << s) | (x >> (32 - s)) +} + +// basic MD4 functions +func f(x, y, z uint32) uint32 { + return z ^ (x & (y ^ z)) +} + +func g(x, y, z uint32) uint32 { + return (x & y) + ((x ^ y) & z) +} + +func h(x, y, z uint32) uint32 { + return x ^ y ^ z +} + +// MD4 constants +const ( + k1 uint32 = 0 + k2 uint32 = 0x5A827999 + k3 uint32 = 0x6ED9EBA1 +) + +// round applies the round function as a macro +func round(f func(uint32, uint32, uint32) uint32, a, b, c, d, x uint32, s uint) uint32 { + return rotateLeft(a+f(b, c, d)+x, s) +} + +// halfMD4Transform basic cut-down MD4 transform. Returns only 32 bits of result. +func HalfMD4Transform(buf [4]uint32, in []uint32) uint32 { + var a, b, c, d = buf[0], buf[1], buf[2], buf[3] + + /* Round 1 */ + a = round(f, a, b, c, d, in[0]+k1, 3) + d = round(f, d, a, b, c, in[1]+k1, 7) + c = round(f, c, d, a, b, in[2]+k1, 11) + b = round(f, b, c, d, a, in[3]+k1, 19) + a = round(f, a, b, c, d, in[4]+k1, 3) + d = round(f, d, a, b, c, in[5]+k1, 7) + c = round(f, c, d, a, b, in[6]+k1, 11) + b = round(f, b, c, d, a, in[7]+k1, 19) + + /* Round 2 */ + a = round(g, a, b, c, d, in[1]+k2, 3) + d = round(g, d, a, b, c, in[3]+k2, 5) + c = round(g, c, d, a, b, in[5]+k2, 9) + b = round(g, b, c, d, a, in[7]+k2, 13) + a = round(g, a, b, c, d, in[0]+k2, 3) + d = round(g, d, a, b, c, in[2]+k2, 5) + c = round(g, c, d, a, b, in[4]+k2, 9) + b = round(g, b, c, d, a, in[6]+k2, 13) + + /* Round 3 */ + a = round(h, a, b, c, d, in[3]+k3, 3) + d = round(h, d, a, b, c, in[7]+k3, 9) + c = round(h, c, d, a, b, in[2]+k3, 11) + b = round(h, b, c, d, a, in[6]+k3, 15) + a = round(h, a, b, c, d, in[1]+k3, 3) + d = round(h, d, a, b, c, in[5]+k3, 9) + c = round(h, c, d, a, b, in[0]+k3, 11) + b = round(h, b, c, d, a, in[4]+k3, 15) + + buf[0] += a + buf[1] += b + buf[2] += c + buf[3] += d + + return buf[1] +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/miscflags.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/miscflags.go new file mode 100644 index 00000000000..d2a22368e38 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/miscflags.go @@ -0,0 +1,34 @@ +package ext4 + +// miscFlags is a structure holding various miscellaneous flags +type miscFlags struct { + signedDirectoryHash bool + unsignedDirectoryHash bool + developmentTest bool +} + +func parseMiscFlags(flags uint32) miscFlags { + m := miscFlags{ + signedDirectoryHash: flagSignedDirectoryHash.included(flags), + unsignedDirectoryHash: flagUnsignedDirectoryHash.included(flags), + developmentTest: flagTestDevCode.included(flags), + } + return m +} + +func (m *miscFlags) toInt() uint32 { + var flags uint32 + + if m.signedDirectoryHash { + flags |= uint32(flagSignedDirectoryHash) + } + if m.unsignedDirectoryHash { + flags |= uint32(flagUnsignedDirectoryHash) + } + if m.developmentTest { + flags |= uint32(flagTestDevCode) + } + return flags +} + +var defaultMiscFlags = miscFlags{} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/mountoptions.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/mountoptions.go new file mode 100644 index 00000000000..a93a21a11d1 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/mountoptions.go @@ -0,0 +1,182 @@ +package ext4 + +const ( + // default mount options + mountPrintDebugInfo mountOption = 0x1 + mountNewFilesGIDContainingDirectory mountOption = 0x2 + mountUserspaceExtendedAttributes mountOption = 0x4 + mountPosixACLs mountOption = 0x8 + mount16BitUIDs mountOption = 0x10 + mountJournalDataAndMetadata mountOption = 0x20 + mountFlushBeforeJournal mountOption = 0x40 + mountUnorderingDataMetadata mountOption = 0x60 + mountDisableWriteFlushes mountOption = 0x100 + mountTrackMetadataBlocks mountOption = 0x200 + mountDiscardDeviceSupport mountOption = 0x400 + mountDisableDelayedAllocation mountOption = 0x800 +) + +// mountOptions is a structure holding which default mount options are set +type mountOptions struct { + printDebugInfo bool + newFilesGIDContainingDirectory bool + userspaceExtendedAttributes bool + posixACLs bool + use16BitUIDs bool + journalDataAndMetadata bool + flushBeforeJournal bool + unorderingDataMetadata bool + disableWriteFlushes bool + trackMetadataBlocks bool + discardDeviceSupport bool + disableDelayedAllocation bool +} + +type mountOption uint32 + +func (m mountOption) included(a uint32) bool { + return a&uint32(m) == uint32(m) +} + +type MountOpt func(*mountOptions) + +func WithDefaultMountOptionPrintDebuggingInfo(enable bool) MountOpt { + return func(o *mountOptions) { + o.printDebugInfo = enable + } +} + +func WithDefaultMountOptionGIDFromDirectory(enable bool) MountOpt { + return func(o *mountOptions) { + o.newFilesGIDContainingDirectory = enable + } +} + +func WithDefaultMountOptionUserspaceXattrs(enable bool) MountOpt { + return func(o *mountOptions) { + o.userspaceExtendedAttributes = enable + } +} + +func WithDefaultMountOptionPOSIXACLs(enable bool) MountOpt { + return func(o *mountOptions) { + o.posixACLs = enable + } +} + +func WithDefaultMountOptionUID16Bit(enable bool) MountOpt { + return func(o *mountOptions) { + o.use16BitUIDs = enable + } +} + +func WithDefaultMountOptionJournalModeData(enable bool) MountOpt { + return func(o *mountOptions) { + o.journalDataAndMetadata = enable + } +} + +func WithDefaultMountOptionJournalModeOrdered(enable bool) MountOpt { + return func(o *mountOptions) { + o.flushBeforeJournal = enable + } +} + +func WithDefaultMountOptionJournalModeWriteback(enable bool) MountOpt { + return func(o *mountOptions) { + o.unorderingDataMetadata = enable + } +} + +func WithDefaultMountOptionDisableWriteFlushes(enable bool) MountOpt { + return func(o *mountOptions) { + o.disableWriteFlushes = enable + } +} + +func WithDefaultMountOptionBlockValidity(enable bool) MountOpt { + return func(o *mountOptions) { + o.trackMetadataBlocks = enable + } +} + +func WithDefaultMountOptionDiscardSupport(enable bool) MountOpt { + return func(o *mountOptions) { + o.discardDeviceSupport = enable + } +} + +func WithDefaultMountOptionDisableDelayedAllocation(enable bool) MountOpt { + return func(o *mountOptions) { + o.disableDelayedAllocation = enable + } +} + +func defaultMountOptionsFromOpts(opts []MountOpt) *mountOptions { + o := &mountOptions{} + for _, opt := range opts { + opt(o) + } + return o +} + +func parseMountOptions(flags uint32) mountOptions { + m := mountOptions{ + printDebugInfo: mountPrintDebugInfo.included(flags), + newFilesGIDContainingDirectory: mountNewFilesGIDContainingDirectory.included(flags), + userspaceExtendedAttributes: mountUserspaceExtendedAttributes.included(flags), + posixACLs: mountPosixACLs.included(flags), + use16BitUIDs: mount16BitUIDs.included(flags), + journalDataAndMetadata: mountJournalDataAndMetadata.included(flags), + flushBeforeJournal: mountFlushBeforeJournal.included(flags), + unorderingDataMetadata: mountUnorderingDataMetadata.included(flags), + disableWriteFlushes: mountDisableWriteFlushes.included(flags), + trackMetadataBlocks: mountTrackMetadataBlocks.included(flags), + discardDeviceSupport: mountDiscardDeviceSupport.included(flags), + disableDelayedAllocation: mountDisableDelayedAllocation.included(flags), + } + return m +} + +func (m *mountOptions) toInt() uint32 { + var flags uint32 + + if m.printDebugInfo { + flags |= uint32(mountPrintDebugInfo) + } + if m.newFilesGIDContainingDirectory { + flags |= uint32(mountNewFilesGIDContainingDirectory) + } + if m.userspaceExtendedAttributes { + flags |= uint32(mountUserspaceExtendedAttributes) + } + if m.posixACLs { + flags |= uint32(mountPosixACLs) + } + if m.use16BitUIDs { + flags |= uint32(mount16BitUIDs) + } + if m.journalDataAndMetadata { + flags |= uint32(mountJournalDataAndMetadata) + } + if m.flushBeforeJournal { + flags |= uint32(mountFlushBeforeJournal) + } + if m.unorderingDataMetadata { + flags |= uint32(mountUnorderingDataMetadata) + } + if m.disableWriteFlushes { + flags |= uint32(mountDisableWriteFlushes) + } + if m.trackMetadataBlocks { + flags |= uint32(mountTrackMetadataBlocks) + } + if m.discardDeviceSupport { + flags |= uint32(mountDiscardDeviceSupport) + } + if m.disableDelayedAllocation { + flags |= uint32(mountDisableDelayedAllocation) + } + + return flags +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/superblock.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/superblock.go new file mode 100644 index 00000000000..fcafda9390b --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/superblock.go @@ -0,0 +1,768 @@ +package ext4 + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "sort" + "time" + + "github.com/diskfs/go-diskfs/filesystem/ext4/crc" + "github.com/diskfs/go-diskfs/util" + "github.com/google/uuid" +) + +type filesystemState uint16 +type errorBehaviour uint16 +type osFlag uint32 +type feature uint32 +type hashAlgorithm byte +type flag uint32 +type encryptionAlgorithm byte + +func (f feature) included(a uint32) bool { + return a&uint32(f) == uint32(f) +} + +//nolint:unused // we know this is unused, but it will be needed in future +func (f flag) equal(a flag) bool { + return f == a +} +func (f flag) included(a uint32) bool { + return a&uint32(f) == uint32(f) +} + +const ( + // superblockSignature is the signature for every superblock + superblockSignature uint16 = 0xef53 + // optional states for the filesystem + fsStateCleanlyUnmounted filesystemState = 0x0001 + fsStateErrors filesystemState = 0x0002 + fsStateOrphansRecovered filesystemState = 0x0004 + // how to handle erorrs + errorsContinue errorBehaviour = 1 + errorsRemountReadOnly errorBehaviour = 2 + errorsPanic errorBehaviour = 3 + // checksum type + checkSumTypeCRC32c byte = 1 + // oses + osLinux osFlag = 0 + osHurd osFlag = 1 + osMasix osFlag = 2 + osFreeBSD osFlag = 3 + osLites osFlag = 4 + // compatible, incompatible, and compatibleReadOnly feature flags + compatFeatureDirectoryPreAllocate feature = 0x1 + compatFeatureImagicInodes feature = 0x2 + compatFeatureHasJournal feature = 0x4 + compatFeatureExtendedAttributes feature = 0x8 + compatFeatureReservedGDTBlocksForExpansion feature = 0x10 + compatFeatureDirectoryIndices feature = 0x20 + compatFeatureLazyBlockGroup feature = 0x40 + compatFeatureExcludeInode feature = 0x80 + compatFeatureExcludeBitmap feature = 0x100 + compatFeatureSparseSuperBlockV2 feature = 0x200 + compatFeatureFastCommit feature = 0x400 + compatFeatureStableInodes feature = 0x800 + compatFeatureOrphanFile feature = 0x1000 + incompatFeatureCompression feature = 0x1 + incompatFeatureDirectoryEntriesRecordFileType feature = 0x2 + incompatFeatureRecoveryNeeded feature = 0x4 + incompatFeatureSeparateJournalDevice feature = 0x8 + incompatFeatureMetaBlockGroups feature = 0x10 + incompatFeatureExtents feature = 0x40 + incompatFeature64Bit feature = 0x80 + incompatFeatureMultipleMountProtection feature = 0x100 + incompatFeatureFlexBlockGroups feature = 0x200 + incompatFeatureExtendedAttributeInodes feature = 0x400 + incompatFeatureDataInDirectoryEntries feature = 0x1000 + incompatFeatureMetadataChecksumSeedInSuperblock feature = 0x2000 + incompatFeatureLargeDirectory feature = 0x4000 + incompatFeatureDataInInode feature = 0x8000 + incompatFeatureEncryptInodes feature = 0x10000 + roCompatFeatureSparseSuperblock feature = 0x1 + roCompatFeatureLargeFile feature = 0x2 + roCompatFeatureBtreeDirectory feature = 0x4 + roCompatFeatureHugeFile feature = 0x8 + roCompatFeatureGDTChecksum feature = 0x10 + roCompatFeatureLargeSubdirectoryCount feature = 0x20 + roCompatFeatureLargeInodes feature = 0x40 + roCompatFeatureSnapshot feature = 0x80 + roCompatFeatureQuota feature = 0x100 + roCompatFeatureBigalloc feature = 0x200 + roCompatFeatureMetadataChecksums feature = 0x400 + roCompatFeatureReplicas feature = 0x800 + roCompatFeatureReadOnly feature = 0x1000 + roCompatFeatureProjectQuotas feature = 0x2000 + // hash algorithms for htree directory entries + hashLegacy hashAlgorithm = 0x0 + hashHalfMD4 hashAlgorithm = 0x1 + hashTea hashAlgorithm = 0x2 + hashLegacyUnsigned hashAlgorithm = 0x3 + hashHalfMD4Unsigned hashAlgorithm = 0x4 + hashTeaUnsigned hashAlgorithm = 0x5 + // miscellaneous flags + flagSignedDirectoryHash flag = 0x0001 + flagUnsignedDirectoryHash flag = 0x0002 + flagTestDevCode flag = 0x0004 + // encryption algorithms + //nolint:unused // we know these are unused, but they will be needed in the future + encryptionAlgorithmInvalid encryptionAlgorithm = 0 + encryptionAlgorithm256AESXTS encryptionAlgorithm = 1 + encryptionAlgorithm256AESGCM encryptionAlgorithm = 2 + encryptionAlgorithm256AESCBC encryptionAlgorithm = 3 +) + +// journalBackup is a backup in the superblock of the journal's inode i_block[] array and size +type journalBackup struct { + iBlocks [15]uint32 + iSize uint64 +} + +// Superblock is a structure holding the ext4 superblock +type superblock struct { + inodeCount uint32 + blockCount uint64 + reservedBlocks uint64 + freeBlocks uint64 + freeInodes uint32 + firstDataBlock uint32 + blockSize uint32 + clusterSize uint64 + blocksPerGroup uint32 + clustersPerGroup uint32 + inodesPerGroup uint32 + mountTime time.Time + writeTime time.Time + mountCount uint16 + mountsToFsck uint16 + filesystemState filesystemState + errorBehaviour errorBehaviour + minorRevision uint16 + lastCheck time.Time + checkInterval uint32 + creatorOS osFlag + revisionLevel uint32 + reservedBlocksDefaultUID uint16 + reservedBlocksDefaultGID uint16 + firstNonReservedInode uint32 + inodeSize uint16 + blockGroup uint16 + features featureFlags + uuid *uuid.UUID + volumeLabel string + lastMountedDirectory string + algorithmUsageBitmap uint32 + preallocationBlocks byte + preallocationDirectoryBlocks byte + reservedGDTBlocks uint16 + journalSuperblockUUID *uuid.UUID + journalInode uint32 + journalDeviceNumber uint32 + orphanedInodesStart uint32 + hashTreeSeed []uint32 + hashVersion hashAlgorithm + groupDescriptorSize uint16 + defaultMountOptions mountOptions + firstMetablockGroup uint32 + mkfsTime time.Time + journalBackup *journalBackup + // 64-bit mode features + inodeMinBytes uint16 + inodeReserveBytes uint16 + miscFlags miscFlags + raidStride uint16 + multiMountPreventionInterval uint16 + multiMountProtectionBlock uint64 + raidStripeWidth uint32 + logGroupsPerFlex uint64 + checksumType byte + totalKBWritten uint64 + snapshotInodeNumber uint32 + snapshotID uint32 + snapshotReservedBlocks uint64 + snapshotStartInode uint32 + errorCount uint32 + errorFirstTime time.Time + errorFirstInode uint32 + errorFirstBlock uint64 + errorFirstFunction string + errorFirstLine uint32 + errorLastTime time.Time + errorLastInode uint32 + errorLastLine uint32 + errorLastBlock uint64 + errorLastFunction string + errorFirstCode byte + errorLastCode byte + mountOptions string + userQuotaInode uint32 + groupQuotaInode uint32 + overheadBlocks uint32 + backupSuperblockBlockGroups [2]uint32 + encryptionAlgorithms [4]encryptionAlgorithm + encryptionSalt [16]byte + lostFoundInode uint32 + projectQuotaInode uint32 + checksumSeed uint32 + // encoding + filenameCharsetEncoding uint16 + filenameCharsetEncodingFlags uint16 + // inode for tracking orphaned inodes + orphanedInodeInodeNumber uint32 +} + +func (sb *superblock) equal(o *superblock) bool { + if (sb == nil && o != nil) || (o == nil && sb != nil) { + return false + } + if sb == nil && o == nil { + return true + } + return reflect.DeepEqual(sb, o) +} + +// FSInformationSectorFromBytes create an FSInformationSector struct from bytes +func superblockFromBytes(b []byte) (*superblock, error) { + bLen := len(b) + if bLen != int(SuperblockSize) { + return nil, fmt.Errorf("cannot read superblock from %d bytes instead of expected %d", bLen, SuperblockSize) + } + + // check the magic signature + actualSignature := binary.LittleEndian.Uint16(b[0x38:0x3a]) + if actualSignature != superblockSignature { + return nil, fmt.Errorf("erroneous signature at location 0x38 was %x instead of expected %x", actualSignature, superblockSignature) + } + + sb := superblock{} + + // first read feature flags of various types + compatFlags := binary.LittleEndian.Uint32(b[0x5c:0x60]) + incompatFlags := binary.LittleEndian.Uint32(b[0x60:0x64]) + roCompatFlags := binary.LittleEndian.Uint32(b[0x64:0x68]) + // track which ones are set + sb.features = parseFeatureFlags(compatFlags, incompatFlags, roCompatFlags) + + sb.inodeCount = binary.LittleEndian.Uint32(b[0:4]) + + // block count, reserved block count and free blocks depends on whether the fs is 64-bit or not + blockCount := make([]byte, 8) + reservedBlocks := make([]byte, 8) + freeBlocks := make([]byte, 8) + + copy(blockCount[0:4], b[0x4:0x8]) + copy(reservedBlocks[0:4], b[0x8:0xc]) + copy(freeBlocks[0:4], b[0xc:0x10]) + + if sb.features.fs64Bit { + copy(blockCount[4:8], b[0x150:0x154]) + copy(reservedBlocks[4:8], b[0x154:0x158]) + copy(freeBlocks[4:8], b[0x158:0x15c]) + } + sb.blockCount = binary.LittleEndian.Uint64(blockCount) + sb.reservedBlocks = binary.LittleEndian.Uint64(reservedBlocks) + sb.freeBlocks = binary.LittleEndian.Uint64(freeBlocks) + + sb.freeInodes = binary.LittleEndian.Uint32(b[0x10:0x14]) + sb.firstDataBlock = binary.LittleEndian.Uint32(b[0x14:0x18]) + sb.blockSize = uint32(math.Exp2(float64(10 + binary.LittleEndian.Uint32(b[0x18:0x1c])))) + sb.clusterSize = uint64(math.Exp2(float64(binary.LittleEndian.Uint32(b[0x1c:0x20])))) + sb.blocksPerGroup = binary.LittleEndian.Uint32(b[0x20:0x24]) + if sb.features.bigalloc { + sb.clustersPerGroup = binary.LittleEndian.Uint32(b[0x24:0x28]) + } + sb.inodesPerGroup = binary.LittleEndian.Uint32(b[0x28:0x2c]) + // these higher bits are listed as reserved in https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout + // but looking at the source to mke2fs, we see that some are used, see + // https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/ext2fs/ext2_fs.h#n653 + // + // mount time has low 32 bits at 0x2c and high 8 bits at 0x274 + // write time has low 32 bits at 0x30 and high 8 bits at 0x275 + // mkfs time has low 32 bits at 0x108 and high 8 bits at 0x276 + // lastcheck time has low 32 bits at 0x40 and high 8 bits at 0x277 + // firsterror time has low 32 bits at 0x198 and high 8 bits at 0x278 + // lasterror time has low 32 bits at 0x1cc and high 8 bits at 0x279 + // firsterror code is 8 bits at 0x27a + // lasterror code is 8 bits at 0x27b + sb.mountTime = bytesToTime(b[0x2c:0x30], b[0x274:0x275]) + sb.writeTime = bytesToTime(b[0x30:0x34], b[0x275:0x276]) + sb.mkfsTime = bytesToTime(b[0x108:0x10c], b[0x276:0x277]) + sb.lastCheck = bytesToTime(b[0x40:0x44], b[0x277:0x278]) + sb.errorFirstTime = bytesToTime(b[0x198:0x19c], b[0x278:0x279]) + sb.errorLastTime = bytesToTime(b[0x1cc:0x1d0], b[0x279:0x280]) + + sb.errorFirstCode = b[0x27a] + sb.errorLastCode = b[0x27b] + + sb.mountCount = binary.LittleEndian.Uint16(b[0x34:0x36]) + sb.mountsToFsck = binary.LittleEndian.Uint16(b[0x36:0x38]) + + sb.filesystemState = filesystemState(binary.LittleEndian.Uint16(b[0x3a:0x3c])) + sb.errorBehaviour = errorBehaviour(binary.LittleEndian.Uint16(b[0x3c:0x3e])) + + sb.minorRevision = binary.LittleEndian.Uint16(b[0x3e:0x40]) + sb.checkInterval = binary.LittleEndian.Uint32(b[0x44:0x48]) + + sb.creatorOS = osFlag(binary.LittleEndian.Uint32(b[0x48:0x4c])) + sb.revisionLevel = binary.LittleEndian.Uint32(b[0x4c:0x50]) + sb.reservedBlocksDefaultUID = binary.LittleEndian.Uint16(b[0x50:0x52]) + sb.reservedBlocksDefaultGID = binary.LittleEndian.Uint16(b[0x52:0x54]) + + sb.firstNonReservedInode = binary.LittleEndian.Uint32(b[0x54:0x58]) + sb.inodeSize = binary.LittleEndian.Uint16(b[0x58:0x5a]) + sb.blockGroup = binary.LittleEndian.Uint16(b[0x5a:0x5c]) + + voluuid, err := uuid.FromBytes(b[0x68:0x78]) + if err != nil { + return nil, fmt.Errorf("unable to read volume UUID: %v", err) + } + sb.uuid = &voluuid + sb.volumeLabel = minString(b[0x78:0x88]) + sb.lastMountedDirectory = minString(b[0x88:0xc8]) + sb.algorithmUsageBitmap = binary.LittleEndian.Uint32(b[0xc8:0xcc]) + + sb.preallocationBlocks = b[0xcc] + sb.preallocationDirectoryBlocks = b[0xcd] + sb.reservedGDTBlocks = binary.LittleEndian.Uint16(b[0xce:0xd0]) + + journaluuid, err := uuid.FromBytes(b[0xd0:0xe0]) + if err != nil { + return nil, fmt.Errorf("unable to read journal UUID: %v", err) + } + sb.journalSuperblockUUID = &journaluuid + sb.journalInode = binary.LittleEndian.Uint32(b[0xe0:0xe4]) + sb.journalDeviceNumber = binary.LittleEndian.Uint32(b[0xe4:0xe8]) + sb.orphanedInodesStart = binary.LittleEndian.Uint32(b[0xe8:0xec]) + + htreeSeed := make([]uint32, 0, 4) + htreeSeed = append(htreeSeed, + binary.LittleEndian.Uint32(b[0xec:0xf0]), + binary.LittleEndian.Uint32(b[0xf0:0xf4]), + binary.LittleEndian.Uint32(b[0xf4:0xf8]), + binary.LittleEndian.Uint32(b[0xf8:0xfc]), + ) + sb.hashTreeSeed = htreeSeed + + sb.hashVersion = hashAlgorithm(b[0xfc]) + + sb.groupDescriptorSize = binary.LittleEndian.Uint16(b[0xfe:0x100]) + + sb.defaultMountOptions = parseMountOptions(binary.LittleEndian.Uint32(b[0x100:0x104])) + sb.firstMetablockGroup = binary.LittleEndian.Uint32(b[0x104:0x108]) + + journalBackupType := b[0xfd] + if journalBackupType == 0 || journalBackupType == 1 { + journalBackupArray := [15]uint32{} + startJournalBackup := 0x10c + for i := 0; i < 15; i++ { + start := startJournalBackup + 4*i + end := startJournalBackup + 4*i + 4 + journalBackupArray[i] = binary.LittleEndian.Uint32(b[start:end]) + } + iSizeBytes := make([]byte, 8) + + copy(iSizeBytes[0:4], b[startJournalBackup+4*16:startJournalBackup+4*17]) + copy(iSizeBytes[4:8], b[startJournalBackup+4*15:startJournalBackup+4*16]) + + sb.journalBackup = &journalBackup{ + iSize: binary.LittleEndian.Uint64(iSizeBytes), + iBlocks: journalBackupArray, + } + } + + sb.inodeMinBytes = binary.LittleEndian.Uint16(b[0x15c:0x15e]) + sb.inodeReserveBytes = binary.LittleEndian.Uint16(b[0x15e:0x160]) + sb.miscFlags = parseMiscFlags(binary.LittleEndian.Uint32(b[0x160:0x164])) + + sb.raidStride = binary.LittleEndian.Uint16(b[0x164:0x166]) + sb.raidStripeWidth = binary.LittleEndian.Uint32(b[0x170:0x174]) + + sb.multiMountPreventionInterval = binary.LittleEndian.Uint16(b[0x166:0x168]) + sb.multiMountProtectionBlock = binary.LittleEndian.Uint64(b[0x168:0x170]) + + sb.logGroupsPerFlex = uint64(math.Exp2(float64(b[0x174]))) + + sb.checksumType = b[0x175] // only valid one is 1 + if sb.checksumType != checkSumTypeCRC32c { + return nil, fmt.Errorf("cannot read superblock: invalid checksum type %d, only valid is %d", sb.checksumType, checkSumTypeCRC32c) + } + + // b[0x176:0x178] are reserved padding + + sb.totalKBWritten = binary.LittleEndian.Uint64(b[0x178:0x180]) + + sb.snapshotInodeNumber = binary.LittleEndian.Uint32(b[0x180:0x184]) + sb.snapshotID = binary.LittleEndian.Uint32(b[0x184:0x188]) + sb.snapshotReservedBlocks = binary.LittleEndian.Uint64(b[0x188:0x190]) + sb.snapshotStartInode = binary.LittleEndian.Uint32(b[0x190:0x194]) + + // errors + sb.errorCount = binary.LittleEndian.Uint32(b[0x194:0x198]) + sb.errorFirstInode = binary.LittleEndian.Uint32(b[0x19c:0x1a0]) + sb.errorFirstBlock = binary.LittleEndian.Uint64(b[0x1a0:0x1a8]) + sb.errorFirstFunction = minString(b[0x1a8:0x1c8]) + sb.errorFirstLine = binary.LittleEndian.Uint32(b[0x1c8:0x1cc]) + sb.errorLastInode = binary.LittleEndian.Uint32(b[0x1d0:0x1d4]) + sb.errorLastLine = binary.LittleEndian.Uint32(b[0x1d4:0x1d8]) + sb.errorLastBlock = binary.LittleEndian.Uint64(b[0x1d8:0x1e0]) + sb.errorLastFunction = minString(b[0x1e0:0x200]) + + sb.mountOptions = minString(b[0x200:0x240]) + sb.userQuotaInode = binary.LittleEndian.Uint32(b[0x240:0x244]) + sb.groupQuotaInode = binary.LittleEndian.Uint32(b[0x244:0x248]) + // overheadBlocks *always* is 0 + sb.overheadBlocks = binary.LittleEndian.Uint32(b[0x248:0x24c]) + sb.backupSuperblockBlockGroups = [2]uint32{ + binary.LittleEndian.Uint32(b[0x24c:0x250]), + binary.LittleEndian.Uint32(b[0x250:0x254]), + } + for i := 0; i < 4; i++ { + sb.encryptionAlgorithms[i] = encryptionAlgorithm(b[0x254+i]) + } + for i := 0; i < 16; i++ { + sb.encryptionSalt[i] = b[0x258+i] + } + sb.lostFoundInode = binary.LittleEndian.Uint32(b[0x268:0x26c]) + sb.projectQuotaInode = binary.LittleEndian.Uint32(b[0x26c:0x270]) + + sb.checksumSeed = binary.LittleEndian.Uint32(b[0x270:0x274]) + // what if the seed is missing? It can be. + if sb.features.metadataChecksums && sb.checksumSeed == 0 { + sb.checksumSeed = crc.CRC32c(0xffffffff, sb.uuid[:]) + } + + sb.filenameCharsetEncoding = binary.LittleEndian.Uint16(b[0x27c:0x27e]) + sb.filenameCharsetEncodingFlags = binary.LittleEndian.Uint16(b[0x27e:0x280]) + sb.orphanedInodeInodeNumber = binary.LittleEndian.Uint32(b[0x280:0x284]) + + // b[0x288:0x3fc] are reserved for zero padding + + // checksum + checksum := binary.LittleEndian.Uint32(b[0x3fc:0x400]) + + // calculate the checksum and validate - we use crc32c + if sb.features.metadataChecksums { + actualChecksum := crc.CRC32c(0xffffffff, b[0:0x3fc]) + if actualChecksum != checksum { + return nil, fmt.Errorf("invalid superblock checksum, actual was %x, on disk was %x, inverted on disk was %x", actualChecksum, checksum, 0xffffffff-checksum) + } + } + + return &sb, nil +} + +// toBytes returns a superblock ready to be written to disk +func (sb *superblock) toBytes() ([]byte, error) { + b := make([]byte, SuperblockSize) + + binary.LittleEndian.PutUint16(b[0x38:0x3a], superblockSignature) + compatFlags, incompatFlags, roCompatFlags := sb.features.toInts() + binary.LittleEndian.PutUint32(b[0x5c:0x60], compatFlags) + binary.LittleEndian.PutUint32(b[0x60:0x64], incompatFlags) + binary.LittleEndian.PutUint32(b[0x64:0x68], roCompatFlags) + + binary.LittleEndian.PutUint32(b[0:4], sb.inodeCount) + + // block count, reserved block count and free blocks depends on whether the fs is 64-bit or not + blockCount := make([]byte, 8) + reservedBlocks := make([]byte, 8) + freeBlocks := make([]byte, 8) + + binary.LittleEndian.PutUint64(blockCount, sb.blockCount) + binary.LittleEndian.PutUint64(reservedBlocks, sb.reservedBlocks) + binary.LittleEndian.PutUint64(freeBlocks, sb.freeBlocks) + + copy(b[0x4:0x8], blockCount[0:4]) + copy(b[0x8:0xc], reservedBlocks[0:4]) + copy(b[0xc:0x10], freeBlocks[0:4]) + + if sb.features.fs64Bit { + copy(b[0x150:0x154], blockCount[4:8]) + copy(b[0x154:0x158], reservedBlocks[4:8]) + copy(b[0x158:0x15c], freeBlocks[4:8]) + } + + binary.LittleEndian.PutUint32(b[0x10:0x14], sb.freeInodes) + binary.LittleEndian.PutUint32(b[0x14:0x18], sb.firstDataBlock) + binary.LittleEndian.PutUint32(b[0x18:0x1c], uint32(math.Log2(float64(sb.blockSize))-10)) + binary.LittleEndian.PutUint32(b[0x1c:0x20], uint32(math.Log2(float64(sb.clusterSize)))) + + binary.LittleEndian.PutUint32(b[0x20:0x24], sb.blocksPerGroup) + if sb.features.bigalloc { + binary.LittleEndian.PutUint32(b[0x24:0x28], sb.clustersPerGroup) + } else { + binary.LittleEndian.PutUint32(b[0x24:0x28], sb.blocksPerGroup) + } + binary.LittleEndian.PutUint32(b[0x28:0x2c], sb.inodesPerGroup) + mountTime := timeToBytes(sb.mountTime) + writeTime := timeToBytes(sb.writeTime) + mkfsTime := timeToBytes(sb.mkfsTime) + lastCheck := timeToBytes(sb.lastCheck) + errorFirstTime := timeToBytes(sb.errorFirstTime) + errorLastTime := timeToBytes(sb.errorLastTime) + + // mount time low bits, high bit + copy(b[0x2c:0x30], mountTime[0:4]) + b[0x274] = mountTime[4] + // write time low bits, high bit + copy(b[0x30:0x34], writeTime[0:4]) + b[0x275] = writeTime[4] + // mkfs time low bits, high bit + copy(b[0x108:0x10c], mkfsTime[0:4]) + b[0x276] = mkfsTime[4] + // last check time low bits, high bit + copy(b[0x40:0x44], lastCheck[0:4]) + b[0x277] = lastCheck[4] + // first error time low bits, high bit + copy(b[0x198:0x19c], errorFirstTime[0:4]) + b[0x278] = errorFirstTime[4] + // last error time low bits, high bit + copy(b[0x1cc:0x1d0], errorLastTime[0:4]) + b[0x279] = errorLastTime[4] + + // error codes + b[0x27a] = sb.errorFirstCode + b[0x27b] = sb.errorLastCode + + binary.LittleEndian.PutUint16(b[0x34:0x36], sb.mountCount) + binary.LittleEndian.PutUint16(b[0x36:0x38], sb.mountsToFsck) + + binary.LittleEndian.PutUint16(b[0x3a:0x3c], uint16(sb.filesystemState)) + binary.LittleEndian.PutUint16(b[0x3c:0x3e], uint16(sb.errorBehaviour)) + + binary.LittleEndian.PutUint16(b[0x3e:0x40], sb.minorRevision) + binary.LittleEndian.PutUint32(b[0x40:0x44], uint32(sb.lastCheck.Unix())) + binary.LittleEndian.PutUint32(b[0x44:0x48], sb.checkInterval) + + binary.LittleEndian.PutUint32(b[0x48:0x4c], uint32(sb.creatorOS)) + binary.LittleEndian.PutUint32(b[0x4c:0x50], sb.revisionLevel) + binary.LittleEndian.PutUint16(b[0x50:0x52], sb.reservedBlocksDefaultUID) + binary.LittleEndian.PutUint16(b[0x52:0x54], sb.reservedBlocksDefaultGID) + + binary.LittleEndian.PutUint32(b[0x54:0x58], sb.firstNonReservedInode) + binary.LittleEndian.PutUint16(b[0x58:0x5a], sb.inodeSize) + binary.LittleEndian.PutUint16(b[0x5a:0x5c], sb.blockGroup) + + if sb.uuid != nil { + copy(b[0x68:0x78], sb.uuid[:]) + } + + ab, err := stringToASCIIBytes(sb.volumeLabel, 16) + if err != nil { + return nil, fmt.Errorf("error converting volume label to bytes: %v", err) + } + copy(b[0x78:0x88], ab[0:16]) + ab, err = stringToASCIIBytes(sb.lastMountedDirectory, 64) + if err != nil { + return nil, fmt.Errorf("error last mounted directory to bytes: %v", err) + } + copy(b[0x88:0xc8], ab[0:64]) + + binary.LittleEndian.PutUint32(b[0xc8:0xcc], sb.algorithmUsageBitmap) + + b[0xcc] = sb.preallocationBlocks + b[0xcd] = sb.preallocationDirectoryBlocks + binary.LittleEndian.PutUint16(b[0xce:0xd0], sb.reservedGDTBlocks) + + if sb.journalSuperblockUUID != nil { + copy(b[0xd0:0xe0], sb.journalSuperblockUUID[:]) + } + + binary.LittleEndian.PutUint32(b[0xe0:0xe4], sb.journalInode) + binary.LittleEndian.PutUint32(b[0xe4:0xe8], sb.journalDeviceNumber) + binary.LittleEndian.PutUint32(b[0xe8:0xec], sb.orphanedInodesStart) + + // to be safe + if len(sb.hashTreeSeed) < 4 { + sb.hashTreeSeed = append(sb.hashTreeSeed, 0, 0, 0, 0) + } + binary.LittleEndian.PutUint32(b[0xec:0xf0], sb.hashTreeSeed[0]) + binary.LittleEndian.PutUint32(b[0xf0:0xf4], sb.hashTreeSeed[1]) + binary.LittleEndian.PutUint32(b[0xf4:0xf8], sb.hashTreeSeed[2]) + binary.LittleEndian.PutUint32(b[0xf8:0xfc], sb.hashTreeSeed[3]) + + b[0xfc] = byte(sb.hashVersion) + + binary.LittleEndian.PutUint16(b[0xfe:0x100], sb.groupDescriptorSize) + + binary.LittleEndian.PutUint32(b[0x100:0x104], sb.defaultMountOptions.toInt()) + binary.LittleEndian.PutUint32(b[0x104:0x108], sb.firstMetablockGroup) + + if sb.journalBackup != nil { + b[0xfd] = 1 + startJournalBackup := 0x10c + for i := 0; i < 15; i++ { + start := startJournalBackup + 4*i + end := startJournalBackup + 4*i + 4 + binary.LittleEndian.PutUint32(b[start:end], sb.journalBackup.iBlocks[i]) + } + + iSizeBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(iSizeBytes, sb.journalBackup.iSize) + copy(b[startJournalBackup+4*16:startJournalBackup+4*17], iSizeBytes[0:4]) + copy(b[startJournalBackup+4*15:startJournalBackup+4*16], iSizeBytes[4:8]) + } + + binary.LittleEndian.PutUint16(b[0x15c:0x15e], sb.inodeMinBytes) + binary.LittleEndian.PutUint16(b[0x15e:0x160], sb.inodeReserveBytes) + binary.LittleEndian.PutUint32(b[0x160:0x164], sb.miscFlags.toInt()) + + binary.LittleEndian.PutUint16(b[0x164:0x166], sb.raidStride) + binary.LittleEndian.PutUint32(b[0x170:0x174], sb.raidStripeWidth) + + binary.LittleEndian.PutUint16(b[0x166:0x168], sb.multiMountPreventionInterval) + binary.LittleEndian.PutUint64(b[0x168:0x170], sb.multiMountProtectionBlock) + + b[0x174] = uint8(math.Log2(float64(sb.logGroupsPerFlex))) + + b[0x175] = sb.checksumType // only valid one is 1 + + // b[0x176:0x178] are reserved padding + + binary.LittleEndian.PutUint64(b[0x178:0x180], sb.totalKBWritten) + + binary.LittleEndian.PutUint32(b[0x180:0x184], sb.snapshotInodeNumber) + binary.LittleEndian.PutUint32(b[0x184:0x188], sb.snapshotID) + binary.LittleEndian.PutUint64(b[0x188:0x190], sb.snapshotReservedBlocks) + binary.LittleEndian.PutUint32(b[0x190:0x194], sb.snapshotStartInode) + + // errors + binary.LittleEndian.PutUint32(b[0x194:0x198], sb.errorCount) + binary.LittleEndian.PutUint32(b[0x19c:0x1a0], sb.errorFirstInode) + binary.LittleEndian.PutUint64(b[0x1a0:0x1a8], sb.errorFirstBlock) + errorFirstFunctionBytes, err := stringToASCIIBytes(sb.errorFirstFunction, 32) + if err != nil { + return nil, fmt.Errorf("error converting errorFirstFunction to bytes: %v", err) + } + copy(b[0x1a8:0x1c8], errorFirstFunctionBytes) + binary.LittleEndian.PutUint32(b[0x1c8:0x1cc], sb.errorFirstLine) + binary.LittleEndian.PutUint32(b[0x1d0:0x1d4], sb.errorLastInode) + binary.LittleEndian.PutUint32(b[0x1d4:0x1d8], sb.errorLastLine) + binary.LittleEndian.PutUint64(b[0x1d8:0x1e0], sb.errorLastBlock) + errorLastFunctionBytes, err := stringToASCIIBytes(sb.errorLastFunction, 32) + if err != nil { + return nil, fmt.Errorf("error converting errorLastFunction to bytes: %v", err) + } + copy(b[0x1e0:0x200], errorLastFunctionBytes) + + mountOptionsBytes, err := stringToASCIIBytes(sb.mountOptions, 64) + if err != nil { + return nil, fmt.Errorf("error converting mountOptions to bytes: %v", err) + } + copy(b[0x200:0x240], mountOptionsBytes) + binary.LittleEndian.PutUint32(b[0x240:0x244], sb.userQuotaInode) + binary.LittleEndian.PutUint32(b[0x244:0x248], sb.groupQuotaInode) + // overheadBlocks *always* is 0 + binary.LittleEndian.PutUint32(b[0x248:0x24c], sb.overheadBlocks) + binary.LittleEndian.PutUint32(b[0x24c:0x250], sb.backupSuperblockBlockGroups[0]) + binary.LittleEndian.PutUint32(b[0x250:0x254], sb.backupSuperblockBlockGroups[1]) + // safety check of encryption algorithms + + for i := 0; i < 4; i++ { + b[0x254+i] = byte(sb.encryptionAlgorithms[i]) + } + for i := 0; i < 16; i++ { + b[0x258+i] = sb.encryptionSalt[i] + } + binary.LittleEndian.PutUint32(b[0x268:0x26c], sb.lostFoundInode) + binary.LittleEndian.PutUint32(b[0x26c:0x270], sb.projectQuotaInode) + + binary.LittleEndian.PutUint32(b[0x270:0x274], sb.checksumSeed) + + binary.LittleEndian.PutUint16(b[0x27c:0x27e], sb.filenameCharsetEncoding) + binary.LittleEndian.PutUint16(b[0x27e:0x280], sb.filenameCharsetEncodingFlags) + binary.LittleEndian.PutUint32(b[0x280:0x284], sb.orphanedInodeInodeNumber) + + // b[0x288:0x3fc] are reserved for zero padding + + // calculate the checksum and validate - we use crc32c + if sb.features.metadataChecksums { + actualChecksum := crc.CRC32c(0xffffffff, b[0:0x3fc]) + binary.LittleEndian.PutUint32(b[0x3fc:0x400], actualChecksum) + } + + return b, nil +} + +func (sb *superblock) gdtChecksumType() gdtChecksumType { + var gdtChecksumTypeInFS gdtChecksumType + switch { + case sb.features.metadataChecksums: + gdtChecksumTypeInFS = gdtChecksumMetadata + case sb.features.gdtChecksum: + gdtChecksumTypeInFS = gdtChecksumGdt + default: + gdtChecksumTypeInFS = gdtChecksumNone + } + return gdtChecksumTypeInFS +} + +func (sb *superblock) blockGroupCount() uint64 { + whole := sb.blockCount / uint64(sb.blocksPerGroup) + part := sb.blockCount % uint64(sb.blocksPerGroup) + if part > 0 { + whole++ + } + return whole +} + +// calculateBackupSuperblocks calculate which block groups should have backup superblocks. +func calculateBackupSuperblockGroups(bgs int64) []int64 { + // calculate which block groups should have backup superblocks + // these are if the block group number is a power of 3, 5, or 7 + var backupGroups []int64 + for i := float64(0); ; i++ { + bg := int64(math.Pow(3, i)) + if bg >= bgs { + break + } + backupGroups = append(backupGroups, bg) + } + for i := float64(0); ; i++ { + bg := int64(math.Pow(5, i)) + if bg >= bgs { + break + } + backupGroups = append(backupGroups, bg) + } + for i := float64(0); ; i++ { + bg := int64(math.Pow(7, i)) + if bg >= bgs { + break + } + backupGroups = append(backupGroups, bg) + } + // sort the backup groups + uniqBackupGroups := util.Uniqify[int64](backupGroups) + sort.Slice(uniqBackupGroups, func(i, j int) bool { + return uniqBackupGroups[i] < uniqBackupGroups[j] + }) + return uniqBackupGroups +} + +func bytesToTime(b ...[]byte) time.Time { + // ensure it is at least 8 bytes + var ( + in [8]byte + count int + ) + for _, v := range b { + toCopy := len(v) + if toCopy+count > len(in) { + toCopy = len(in) - count + } + copied := copy(in[count:], v[:toCopy]) + count += copied + } + return time.Unix(int64(binary.LittleEndian.Uint64(in[:])), 0).UTC() +} + +// timeToBytes convert a time.Time to an 8 byte slice. Guarantees 8 bytes +func timeToBytes(t time.Time) []byte { + timestamp := t.Unix() + var b = make([]byte, 8) + binary.LittleEndian.PutUint64(b, uint64(timestamp)) + return b +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/util.go b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/util.go new file mode 100644 index 00000000000..ae229430264 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/ext4/util.go @@ -0,0 +1,106 @@ +package ext4 + +import ( + "fmt" + "strings" +) + +const ( + // KB represents one KB + KB int64 = 1024 + // MB represents one MB + MB int64 = 1024 * KB + // GB represents one GB + GB int64 = 1024 * MB + // TB represents one TB + TB int64 = 1024 * GB + // PB represents one TB + PB int64 = 1024 * TB + // XB represents one Exabyte + XB int64 = 1024 * PB + // these because they are larger than int64 or uint64 can handle + // ZB represents one Zettabyte + // ZB int64 = 1024 * XB + // YB represents one Yottabyte + // YB int64 = 1024 * ZB + // Ext4MaxSize is maximum size of an ext4 filesystem in bytes + // it varies based on the block size and if we are 64-bit or 32-bit mode, but the absolute complete max + // is 64KB per block (128 sectors) in 64-bit mode + // for a max filesystem size of 1YB (yottabyte) + // Ext4MaxSize int64 = YB + // if we ever actually care, we will use math/big to do it + // var xb, ZB, kb, YB big.Int + // kb.SetUint64(1024) + // xb.SetUint64(uint64(XB)) + // ZB.Mul(&xb, &kb) + // YB.Mul(&ZB, &kb) + + // Ext4MinSize is minimum size for an ext4 filesystem + // it assumes a single block group with: + // blocksize = 2 sectors = 1KB + // 1 block for boot code + // 1 block for superblock + // 1 block for block group descriptors + // 1 block for bock and inode bitmaps and inode table + // 1 block for data + // total = 5 blocks + Ext4MinSize int64 = 5 * int64(SectorSize512) + + // volume +) + +func splitPath(p string) []string { + // we need to split such that each one ends in "/", except possibly the last one + parts := strings.Split(p, "/") + // eliminate empty parts + ret := make([]string, 0) + for _, sub := range parts { + if sub != "" { + ret = append(ret, sub) + } + } + return ret +} + +// convert a string to a byte array, if all characters are valid ascii +// always pads to the full length provided in padding. If size is less than the length of the string, it will be truncated +func stringToASCIIBytes(s string, size int) ([]byte, error) { + length := len(s) + b := make([]byte, length) + // convert the name into 11 bytes + r := []rune(s) + // take the first 8 characters + for i := 0; i < length; i++ { + val := int(r[i]) + // we only can handle values less than max byte = 255 + if val > 255 { + return nil, fmt.Errorf("Non-ASCII character in name: %s", s) + } + b[i] = byte(val) + } + if len(b) < size { + // pad with nulls + for i := len(b); i < size; i++ { + b = append(b, 0) + } + } + if len(b) > size { + b = b[:size] + } + return b, nil +} + +// minString convert []byte to string, but drop extraneous 0x0 +func minString(b []byte) string { + // find the last byte that is not 0x0 + if len(b) == 0 { + return "" + } + index := len(b) - 1 + for ; index >= 0; index-- { + if b[index] != 0 { + break + } + } + return string(b[:index+1]) +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/fat32.go b/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/fat32.go index 84961587205..ae6531734f8 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/fat32.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/fat32/fat32.go @@ -375,7 +375,7 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) { _, _ = file.ReadAt(b, int64(fatSecondaryStart)+start) fat2 := tableFromBytes(b) if !fat.equal(fat2) { - return nil, errors.New("fat tables did not much") + return nil, errors.New("fat tables did not match") } dataStart := uint32(fatSecondaryStart) + fat.size @@ -493,9 +493,12 @@ func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { } // once we have made it here, looping is done. We have found the final entry // we need to return all of the file info - count := len(entries) - ret := make([]os.FileInfo, count) - for i, e := range entries { + //nolint:prealloc // because the following loop may omit some entry + var ret []os.FileInfo + for _, e := range entries { + if e.isVolumeLabel { + continue + } shortName := e.filenameShort if e.lowercaseShortname { shortName = strings.ToLower(shortName) @@ -507,13 +510,13 @@ func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { if fileExtension != "" { shortName = fmt.Sprintf("%s.%s", shortName, fileExtension) } - ret[i] = FileInfo{ + ret = append(ret, FileInfo{ modTime: e.modifyTime, name: e.filenameLong, shortName: shortName, size: int64(e.fileSize), isDir: e.isSubdirectory, - } + }) } return ret, nil } @@ -850,6 +853,7 @@ func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, []*di if err != nil { return nil, nil, fmt.Errorf("failed to create subdirectory %s", "/"+strings.Join(paths[0:i+1], "/")) } + currentDir.modifyTime = subdirEntry.createTime // make a basic entry for the new subdir parentDirectoryCluster := currentDir.clusterLocation if parentDirectoryCluster == 2 { @@ -859,8 +863,22 @@ func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, []*di dir := &Directory{ directoryEntry: directoryEntry{clusterLocation: subdirEntry.clusterLocation}, entries: []*directoryEntry{ - {filenameShort: ".", isSubdirectory: true, clusterLocation: subdirEntry.clusterLocation}, - {filenameShort: "..", isSubdirectory: true, clusterLocation: parentDirectoryCluster}, + { + filenameShort: ".", + isSubdirectory: true, + clusterLocation: subdirEntry.clusterLocation, + createTime: subdirEntry.createTime, + modifyTime: subdirEntry.modifyTime, + accessTime: subdirEntry.accessTime, + }, + { + filenameShort: "..", + isSubdirectory: true, + clusterLocation: parentDirectoryCluster, + createTime: currentDir.createTime, + modifyTime: currentDir.modifyTime, + accessTime: currentDir.accessTime, + }, }, } // write the new directory entries to disk diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/filesystem.go b/vendor/github.com/diskfs/go-diskfs/filesystem/filesystem.go index fdd35313d01..2c1acfa6c36 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/filesystem.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/filesystem.go @@ -34,4 +34,6 @@ const ( TypeISO9660 // TypeSquashfs is a squashfs filesystem TypeSquashfs + // TypeExt4 is an ext4 compatible filesystem + TypeExt4 ) diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentry.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentry.go index 1f6947811b1..d9877673041 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentry.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentry.go @@ -119,7 +119,7 @@ func (de *directoryEntry) toBytes(skipExt bool, ceBlocks []uint32) ([][]byte, er filenameBytes = []byte{0x01} default: // first validate the filename - err = validateFilename(de.filename, de.isSubdirectory) + err = validateFilename(de.filename, de.isSubdirectory, de.filesystem.suspEnabled) if err != nil { nametype := "filename" if de.isSubdirectory { @@ -189,9 +189,8 @@ func dirEntryExtensionsToBytes(extensions []directoryEntrySystemUseExtension, ma } b = append(b, ce.Bytes()...) break - } else { - b = append(b, b2...) } + b = append(b, b2...) } ret = append(ret, b) if len(continuedBytes) > 0 { @@ -482,9 +481,24 @@ func (de *directoryEntry) Size() int64 { // Mode() FileMode // file mode bits func (de *directoryEntry) Mode() os.FileMode { + for _, ext := range de.extensions { + if s, ok := ext.(rockRidgeSymlink); ok && !s.continued { + return 0o755 | os.ModeSymlink + } + } return 0o755 } +// Readlink tries to return the target link, only valid for symlinks +func (de *directoryEntry) ReadLink() (string, bool) { + for _, ext := range de.extensions { + if s, ok := ext.(rockRidgeSymlink); ok && !s.continued { + return s.name, true + } + } + return "", false +} + // ModTime() time.Time // modification time func (de *directoryEntry) ModTime() time.Time { return de.creation @@ -534,16 +548,27 @@ func timeToBytes(t time.Time) []byte { } // convert a string to ascii bytes, but only accept valid d-characters -func validateFilename(s string, isDir bool) error { +func validateFilename(s string, isDir, suspExtension bool) error { + var err error + if suspExtension { + err = validateSUSPFilename(s, isDir) + } else { + err = validateISOFilename(s, isDir) + } + return err +} + +// validateISOFilename validates a filename that is plain ISO9660-compliant (levels 2 & 3) +func validateISOFilename(s string, isDir bool) error { var err error + // all allowed up to 30 characters, of A-Z,0-9,_ if isDir { - // directory only allowed up to 8 characters of A-Z,0-9,_ re := regexp.MustCompile("^[A-Z0-9_]{1,30}$") if !re.MatchString(s) { err = fmt.Errorf("directory name must be of up to 30 characters from A-Z0-9_") } } else { - // filename only allowed up to 8 characters of A-Z,0-9,_, plus an optional '.' plus up to 3 characters of A-Z,0-9,_, plus must have ";1" + // filename also allowed an optional '.' plus up to 3 characters of A-Z,0-9,_, plus must have ";1" re := regexp.MustCompile("^[A-Z0-9_]+(.[A-Z0-9_]*)?;1$") switch { case !re.MatchString(s): @@ -555,6 +580,20 @@ func validateFilename(s string, isDir bool) error { return err } +// validateSUSPFilename validates a filename that is Rock Ridge compliant +func validateSUSPFilename(s string, _ bool) error { + var err error + // all allowed up to 255 characters of any kind, except null (0x0) and '/' + re := regexp.MustCompile(`^[^\x00/]*$`) + switch { + case len(s) > 255: + err = fmt.Errorf("filename must be at most 255 characters") + case !re.MatchString(s): + err = fmt.Errorf("filename must not include / or null characters") + } + return err +} + // convert a string to a byte array, if all characters are valid ascii func stringToASCIIBytes(s string) ([]byte, error) { length := len(s) diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentrysystemuseextension.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentrysystemuseextension.go index 1d10ec54eef..8bef9f6c27d 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentrysystemuseextension.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/directoryentrysystemuseextension.go @@ -37,7 +37,7 @@ type suspExtension interface { Descriptor() string Source() string Version() uint8 - GetFileExtensions(string, bool, bool) ([]directoryEntrySystemUseExtension, error) + GetFileExtensions(*finalizeFileInfo, bool, bool) ([]directoryEntrySystemUseExtension, error) GetFinalizeExtensions(*finalizeFileInfo) ([]directoryEntrySystemUseExtension, error) Relocatable() bool Relocate(map[string]*finalizeFileInfo) ([]*finalizeFileInfo, map[string]*finalizeFileInfo, error) diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/eltorito.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/eltorito.go index a2cb818be66..a598ef6695f 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/eltorito.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/eltorito.go @@ -33,7 +33,7 @@ const ( elToritoDefaultCatalogRR = "boot.catalog" ) -// Emulation what emulation should be used for booting, normally none +// Emulation that should be used for booting, normally none type Emulation uint8 const ( @@ -55,7 +55,7 @@ type ElTorito struct { BootCatalog string // HideBootCatalog if the boot catalog should be hidden in the file system. Defaults to false HideBootCatalog bool - // Entries list of ElToritoEntry boot entires + // Entries list of ElToritoEntry boot entries Entries []*ElToritoEntry // Platform supported platform Platform Platform @@ -72,11 +72,11 @@ type ElToritoEntry struct { // option `-boot-info-table`. Unlike genisoimage, does not modify the file in the // filesystem, but inserts it on the fly. BootTable bool - // SystemType type of system the partition is, accordinng to the MBR standard + // SystemType type of system the partition is, according to the MBR standard SystemType mbr.Type // LoadSize how many blocks of BootFile to load, equivalent to genisoimage option `-boot-load-size` LoadSize uint16 - size uint16 + size uint32 location uint32 } @@ -127,7 +127,7 @@ func (e *ElToritoEntry) headerBytes(last bool, entries uint16) []byte { func (e *ElToritoEntry) entryBytes() []byte { blocks := e.LoadSize if blocks == 0 { - blocks = e.size / 512 + blocks = uint16(e.size / 512) if e.size%512 > 1 { blocks++ } @@ -153,7 +153,7 @@ func (e *ElToritoEntry) generateBootTable(pvdSector uint32, p string) ([]byte, e b := make([]byte, 56) binary.LittleEndian.PutUint32(b[0:4], pvdSector) binary.LittleEndian.PutUint32(b[4:8], e.location) - binary.LittleEndian.PutUint32(b[8:12], uint32(e.size)) + binary.LittleEndian.PutUint32(b[8:12], e.size) // Checksum - simply add up all 32-bit words beginning at byte position 64 f, err := os.Open(p) if err != nil { diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/file.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/file.go index 7d6bb519357..83dff8b7966 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/file.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/file.go @@ -64,7 +64,7 @@ func (fl *File) Read(b []byte) (int, error) { // Write writes len(b) bytes to the File. // // you cannot write to an iso, so this returns an error -func (fl *File) Write(p []byte) (int, error) { +func (fl *File) Write(_ []byte) (int, error) { return 0, fmt.Errorf("cannot write to a read-only iso filesystem") } diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/finalize.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/finalize.go index c055cf4d350..aaa32629b9f 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/finalize.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/finalize.go @@ -3,6 +3,7 @@ package iso9660 import ( "fmt" "io" + "io/fs" "os" "path" "path/filepath" @@ -12,11 +13,13 @@ import ( "time" "github.com/diskfs/go-diskfs/util" + "github.com/djherbis/times" ) const ( dataStartSector = 16 defaultVolumeIdentifier = "ISOIMAGE" + elToritoBootTableOffset = 8 ) // FinalizeOptions options to pass to finalize @@ -41,6 +44,14 @@ type FinalizeOptions struct { // IsDir() bool // abbreviation for Mode().IsDir() // Sys() interface{} // underlying data source (can return nil) // +// Also supports: +// +// AccessTime() time.Time +// ChangeTime() time.Time +// Nlink() uint32 // number of hardlinks, if supported +// Uid() uint32 // uid, if supported +// Gid() uint32 // gid, if supported +// //nolint:structcheck // keep unused members so that we can know their references type finalizeFileInfo struct { path string @@ -56,6 +67,8 @@ type finalizeFileInfo struct { size int64 mode os.FileMode modTime time.Time + accessTime time.Time + changeTime time.Time isDir bool isRoot bool bytes [][]byte @@ -64,7 +77,55 @@ type finalizeFileInfo struct { trueParent *finalizeFileInfo trueChild *finalizeFileInfo elToritoEntry *ElToritoEntry - content []byte + linkTarget string + uid uint32 + gid uint32 + nlink uint32 + // content in memory content of file. If this is anything other than nil, including a zero-length slice, + // then this content is used, rather than anything on disk. + content []byte + serial uint64 +} + +func finalizeFileInfoFromFile(p, fullPath string, fi fs.FileInfo) (*finalizeFileInfo, error) { + isRoot := p == "." + name := fi.Name() + shortname, _ := calculateShortnameExtension(name) + + if isRoot { + name = string([]byte{0x00}) + shortname = name + } + t, err := times.Lstat(fullPath) + if err != nil { + return nil, fmt.Errorf("could not get times information for %s: %w", fullPath, err) + } + mode := fi.Mode() + var target string + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err = os.Readlink(fullPath) + if err != nil { + return nil, fmt.Errorf("unable to read link for %s: %w", fullPath, err) + } + } + nlink, uid, gid := statt(fi) + + return &finalizeFileInfo{ + path: p, + name: name, + isDir: fi.IsDir(), + isRoot: isRoot, + modTime: fi.ModTime(), + accessTime: t.AccessTime(), + changeTime: t.ChangeTime(), + mode: mode, + size: fi.Size(), + shortname: shortname, + linkTarget: target, + uid: uid, + gid: gid, + nlink: nlink, + }, nil } func (fi *finalizeFileInfo) Name() string { @@ -100,8 +161,26 @@ func (fi *finalizeFileInfo) updateDepth(depth int) { } } } +func (fi *finalizeFileInfo) AccessTime() time.Time { + return fi.accessTime +} +func (fi *finalizeFileInfo) ChangeTime() time.Time { + return fi.changeTime +} +func (fi *finalizeFileInfo) LinkTarget() string { + return fi.linkTarget +} +func (fi *finalizeFileInfo) Nlink() uint32 { + return fi.nlink +} +func (fi *finalizeFileInfo) UID() uint32 { + return fi.uid +} +func (fi *finalizeFileInfo) GID() uint32 { + return fi.gid +} -func (fi *finalizeFileInfo) toDirectoryEntry(fs *FileSystem, isSelf, isParent bool) (*directoryEntry, error) { +func (fi *finalizeFileInfo) toDirectoryEntry(fsm *FileSystem, isSelf, isParent bool) (*directoryEntry, error) { de := &directoryEntry{ extAttrSize: 0, location: fi.location, @@ -116,18 +195,22 @@ func (fi *finalizeFileInfo) toDirectoryEntry(fs *FileSystem, isSelf, isParent bo isSelf: isSelf, isParent: isParent, volumeSequence: 1, - filesystem: fs, + filesystem: fsm, // we keep the full filename until after processing filename: fi.Name(), } // if it is root, and we have susp enabled, add the necessary entries - if fs.suspEnabled { + if fsm.suspEnabled { if fi.isRoot && isSelf { de.extensions = append(de.extensions, directoryEntrySystemUseExtensionSharingProtocolIndicator{skipBytes: 0}) } // add appropriate PX, TF, SL, NM extensions - for _, e := range fs.suspExtensions { - ext, err := e.GetFileExtensions(path.Join(fs.workspace, fi.path), isSelf, isParent) + for _, e := range fsm.suspExtensions { + var ( + ext []directoryEntrySystemUseExtension + err error + ) + ext, err = e.GetFileExtensions(fi, isSelf, isParent) if err != nil { return nil, fmt.Errorf("error getting extensions for %s at path %s: %v", e.ID(), fi.path, err) } @@ -140,14 +223,14 @@ func (fi *finalizeFileInfo) toDirectoryEntry(fs *FileSystem, isSelf, isParent bo } if fi.isRoot && isSelf { - for _, e := range fs.suspExtensions { + for _, e := range fsm.suspExtensions { de.extensions = append(de.extensions, directoryEntrySystemUseExtensionReference{id: e.ID(), descriptor: e.Descriptor(), source: e.Source(), extensionVersion: e.Version()}) } } } return de, nil } -func (fi *finalizeFileInfo) toDirectory(fs *FileSystem) (*Directory, error) { +func (fi *finalizeFileInfo) toDirectory(fsm *FileSystem) (*Directory, error) { // also need to add self and parent to it var ( self, parent, dirEntry *directoryEntry @@ -156,7 +239,7 @@ func (fi *finalizeFileInfo) toDirectory(fs *FileSystem) (*Directory, error) { if !fi.IsDir() { return nil, fmt.Errorf("cannot convert a file entry to a directtory") } - self, err = fi.toDirectoryEntry(fs, true, false) + self, err = fi.toDirectoryEntry(fsm, true, false) if err != nil { return nil, fmt.Errorf("could not convert self entry %s to dirEntry: %v", fi.path, err) } @@ -167,14 +250,14 @@ func (fi *finalizeFileInfo) toDirectory(fs *FileSystem) (*Directory, error) { if fi.isRoot { parentEntry = fi } - parent, err = parentEntry.toDirectoryEntry(fs, false, true) + parent, err = parentEntry.toDirectoryEntry(fsm, false, true) if err != nil { return nil, fmt.Errorf("could not convert parent entry %s to dirEntry: %v", fi.parent.path, err) } entries := []*directoryEntry{self, parent} for _, child := range fi.children { - dirEntry, err = child.toDirectoryEntry(fs, false, false) + dirEntry, err = child.toDirectoryEntry(fsm, false, false) if err != nil { return nil, fmt.Errorf("could not convert child entry %s to dirEntry: %v", child.path, err) } @@ -188,10 +271,10 @@ func (fi *finalizeFileInfo) toDirectory(fs *FileSystem) (*Directory, error) { } // calculate the size of a directory entry single record -func (fi *finalizeFileInfo) calculateRecordSize(fs *FileSystem, isSelf, isParent bool) (dirEntrySize, continuationBlocksSize int, err error) { +func (fi *finalizeFileInfo) calculateRecordSize(fsm *FileSystem, isSelf, isParent bool) (dirEntrySize, continuationBlocksSize int, err error) { // we do not actually need the the continuation blocks to calculate size, just length, so use an empty slice extTmpBlocks := make([]uint32, 100) - dirEntry, err := fi.toDirectoryEntry(fs, isSelf, isParent) + dirEntry, err := fi.toDirectoryEntry(fsm, isSelf, isParent) if err != nil { return 0, 0, fmt.Errorf("could not convert to dirEntry: %v", err) } @@ -205,21 +288,21 @@ func (fi *finalizeFileInfo) calculateRecordSize(fs *FileSystem, isSelf, isParent } // calculate the size of a directory, similar to a file size -func (fi *finalizeFileInfo) calculateDirectorySize(fs *FileSystem) (dirEntrySize, continuationBlocksSize int, err error) { +func (fi *finalizeFileInfo) calculateDirectorySize(fsm *FileSystem) (dirEntrySize, continuationBlocksSize int, err error) { var ( recSize, recCE int ) if !fi.IsDir() { - return 0, 0, fmt.Errorf("cannot convert a file entry to a directtory") + return 0, 0, fmt.Errorf("cannot convert a file entry to a directory") } - recSize, recCE, err = fi.calculateRecordSize(fs, true, false) + recSize, recCE, err = fi.calculateRecordSize(fsm, true, false) if err != nil { return 0, 0, fmt.Errorf("could not calculate self entry size %s: %v", fi.path, err) } dirEntrySize += recSize continuationBlocksSize += recCE - recSize, recCE, err = fi.calculateRecordSize(fs, false, true) + recSize, recCE, err = fi.calculateRecordSize(fsm, false, true) if err != nil { return 0, 0, fmt.Errorf("could not calculate parent entry size %s: %v", fi.path, err) } @@ -228,13 +311,13 @@ func (fi *finalizeFileInfo) calculateDirectorySize(fs *FileSystem) (dirEntrySize for _, e := range fi.children { // get size of data and CE blocks - recSize, recCE, err = e.calculateRecordSize(fs, false, false) + recSize, recCE, err = e.calculateRecordSize(fsm, false, false) if err != nil { return 0, 0, fmt.Errorf("could not calculate child %s entry size %s: %v", e.path, fi.path, err) } // do not go over a block boundary; pad if necessary newSize := dirEntrySize + recSize - blocksize := int(fs.blocksize) + blocksize := int(fsm.blocksize) left := blocksize - dirEntrySize%blocksize if left != 0 && newSize/blocksize > dirEntrySize/blocksize { dirEntrySize += left @@ -341,21 +424,21 @@ func (fi *finalizeFileInfo) addChild(entry *finalizeFileInfo) { // Finalize finalize a read-only filesystem by writing it out to a read-only format // //nolint:gocyclo // this finalize function is complex and needs to be. We might be better off refactoring it to multiple functions, but it does not buy all that much. -func (fs *FileSystem) Finalize(options FinalizeOptions) error { - if fs.workspace == "" { +func (fsm *FileSystem) Finalize(options FinalizeOptions) error { + if fsm.workspace == "" { return fmt.Errorf("cannot finalize an already finalized filesystem") } // did we ask for susp? if options.RockRidge { - fs.suspEnabled = true - fs.suspExtensions = append(fs.suspExtensions, getRockRidgeExtension(rockRidge112)) + fsm.suspEnabled = true + fsm.suspExtensions = append(fsm.suspExtensions, getRockRidgeExtension(rockRidge112)) } /* There is nothing in the iso9660 spec about the order of directories and files, other than that they must be accessible in the location specified in directory entry and/or path table - However, most implementations seem to it as follows: + However, most implementations seem to do it as follows: - each directory follows its parent - data (i.e. file) sectors in each directory are immediately after its directory and immediately before the next sibling directory to its parent @@ -380,11 +463,11 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { 10- write volume descriptor set terminator */ - f := fs.file - blocksize := int(fs.blocksize) + f := fsm.file + blocksize := int(fsm.blocksize) // 1- blank out sectors 0-15 - b := make([]byte, dataStartSector*fs.blocksize) + b := make([]byte, dataStartSector*fsm.blocksize) n, err := f.WriteAt(b, 0) if err != nil { return fmt.Errorf("could not write blank system area: %v", err) @@ -394,7 +477,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { } // 3- build out file tree - fileList, dirList, err := walkTree(fs.Workspace()) + fileList, dirList, err := walkTree(fsm.Workspace()) if err != nil { return fmt.Errorf("error walking tree: %v", err) } @@ -406,9 +489,9 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { // if we need to relocate directories, must do them here, before finalizing order and sizes // do not bother if enabled DeepDirectories, i.e. non-ISO9660 compliant if !options.DeepDirectories { - if fs.suspEnabled { + if fsm.suspEnabled { var handler suspExtension - for _, e := range fs.suspExtensions { + for _, e := range fsm.suspExtensions { if e.Relocatable() { handler = e break @@ -431,7 +514,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { // convert sizes to required blocks for files for _, e := range fileList { - e.blocks = calculateBlocks(e.size, fs.blocksize) + e.blocks = calculateBlocks(e.size, fsm.blocksize) } // we now have list of all of the files and directories and their properties, as well as children of every directory @@ -467,14 +550,18 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { shortname, extension := calculateShortnameExtension(path.Base(catname)) // break down the catalog basename from the parent dir catSize := int64(len(bootcat)) + now := time.Now() catEntry = &finalizeFileInfo{ - content: bootcat, - size: catSize, - path: catname, - name: path.Base(catname), - shortname: shortname, - extension: extension, - blocks: calculateBlocks(catSize, fs.blocksize), + content: bootcat, + size: catSize, + path: catname, + name: path.Base(catname), + shortname: shortname, + extension: extension, + blocks: calculateBlocks(catSize, fsm.blocksize), + modTime: now, + accessTime: now, + changeTime: now, } // make it the first file files = append([]*finalizeFileInfo{catEntry}, files...) @@ -503,8 +590,11 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { return fmt.Errorf("unable to find image child %s: %v", e.BootFile, err) } } + if child == nil { + return fmt.Errorf("unable to find image child %s: %v", e.BootFile, err) + } // save the child so we can add location late - e.size = uint16(child.size) + e.size = uint32(child.size) child.elToritoEntry = e } } @@ -512,7 +602,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { var size, ceBlocks int for _, dir := range dirs { dir.location = location - size, ceBlocks, err = dir.calculateDirectorySize(fs) + size, ceBlocks, err = dir.calculateDirectorySize(fsm) if err != nil { return fmt.Errorf("unable to calculate size of directory for %s: %v", dir.path, err) } @@ -566,7 +656,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { for _, e := range dirs { writeAt := int64(e.location) * int64(blocksize) var d *Directory - d, err = e.toDirectory(fs) + d, err = e.toDirectory(fsm) if err != nil { return fmt.Errorf("unable to convert entry to directory: %v", err) } @@ -601,13 +691,14 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { }() for _, e := range files { var ( - from *os.File - copied int + from *os.File + copied int + bootTableMinSize int ) writeAt := int64(e.location) * int64(blocksize) if e.content == nil { // for file, just copy the data across - from, err = os.Open(path.Join(fs.workspace, e.path)) + from, err = os.Open(path.Join(fsm.workspace, e.path)) if err != nil { return fmt.Errorf("failed to open file for reading %s: %v", e.path, err) } @@ -617,21 +708,23 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { var count int // first 8 bytes - count, err = copyFileData(from, f, 0, writeAt, 8) + count, err = copyFileData(from, f, 0, writeAt, elToritoBootTableOffset) if err != nil { return fmt.Errorf("failed to copy first bytes 0-8 of boot file to disk %s: %v", e.path, err) } copied += count // insert El Torito Boot Information Table - bootTable, err := e.elToritoEntry.generateBootTable(dataStartSector, path.Join(fs.workspace, e.path)) + bootTable, err := e.elToritoEntry.generateBootTable(dataStartSector, path.Join(fsm.workspace, e.path)) if err != nil { return fmt.Errorf("failed to generate boot table for %s: %v", e.path, err) } - count, err = f.WriteAt(bootTable, writeAt+8) + count, err = f.WriteAt(bootTable, writeAt+elToritoBootTableOffset) if err != nil { return fmt.Errorf("failed to write 56 byte boot table to disk %s: %v", e.path, err) } copied += count + // file with boot table file must be a minimum of boot table size and the offset + bootTableMinSize = count // remainder of file count, err = copyFileData(from, f, 64, writeAt+64, 0) if err != nil { @@ -644,8 +737,12 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { return fmt.Errorf("failed to copy file to disk %s: %v", e.path, err) } } - if copied != int(e.Size()) { - return fmt.Errorf("error copying file %s to disk, copied %d bytes, expected %d", e.path, copied, e.Size()) + targetSize := e.Size() + if targetSize < int64(bootTableMinSize) { + targetSize = int64(bootTableMinSize) + } + if copied != int(targetSize) { + return fmt.Errorf("error copying file %s to disk, copied %d bytes, expected %d", e.path, copied, targetSize) } } else { copied = len(e.content) @@ -665,7 +762,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { location = dataStartSector // create and write the primary volume descriptor, supplementary and boot, and volume descriptor set terminator now := time.Now() - rootDE, err := root.toDirectoryEntry(fs, true, false) + rootDE, err := root.toDirectoryEntry(fsm, true, false) if err != nil { return fmt.Errorf("could not convert root entry for primary volume descriptor to dirEntry: %v", err) } @@ -676,7 +773,7 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { volumeSize: totalSize, setSize: 1, sequenceNumber: 1, - blocksize: uint16(fs.blocksize), + blocksize: uint16(fsm.blocksize), pathTableSize: uint32(pathTableSize), pathTableLLocation: pathTableLLocation, pathTableLOptionalLocation: 0, @@ -710,10 +807,10 @@ func (fs *FileSystem) Finalize(options FinalizeOptions) error { b = terminator.toBytes() _, _ = f.WriteAt(b, int64(location)*int64(blocksize)) - _ = os.RemoveAll(fs.workspace) + _ = os.RemoveAll(fsm.workspace) // finish by setting as finalized - fs.workspace = "" + fsm.workspace = "" return nil } @@ -772,16 +869,16 @@ func sortFinalizeFileInfoPathTable(left, right *finalizeFileInfo) bool { // create a path table from a slice of *finalizeFileInfo that are directories func createPathTable(fi []*finalizeFileInfo) *pathTable { // copy so we do not modify the original - fs := make([]*finalizeFileInfo, len(fi)) - copy(fs, fi) + fis := make([]*finalizeFileInfo, len(fi)) + copy(fis, fi) // sort via the rules - sort.Slice(fs, func(i, j int) bool { - return sortFinalizeFileInfoPathTable(fs[i], fs[j]) + sort.Slice(fis, func(i, j int) bool { + return sortFinalizeFileInfoPathTable(fis[i], fis[j]) }) indexMap := make(map[*finalizeFileInfo]int) // now that it is sorted, create the ordered path table entries entries := make([]*pathTableEntry, 0) - for i, e := range fs { + for i, e := range fis { name := e.Name() nameSize := len(name) size := 8 + uint16(nameSize) @@ -811,28 +908,34 @@ func createPathTable(fi []*finalizeFileInfo) *pathTable { } func walkTree(workspace string) ([]*finalizeFileInfo, map[string]*finalizeFileInfo, error) { - cwd, err := os.Getwd() - if err != nil { - return nil, nil, fmt.Errorf("could not get pwd: %v", err) - } - // make everything relative to the workspace - _ = os.Chdir(workspace) - dirList := make(map[string]*finalizeFileInfo) - fileList := make([]*finalizeFileInfo, 0) - var entry *finalizeFileInfo - err = filepath.Walk(".", func(fp string, fi os.FileInfo, err error) error { + var ( + dirList = make(map[string]*finalizeFileInfo) + fileList = make([]*finalizeFileInfo, 0) + entry *finalizeFileInfo + serial uint64 + ) + err := filepath.WalkDir(workspace, func(actualPath string, d fs.DirEntry, err error) error { if err != nil { - return fmt.Errorf("error walking path %s: %v", fp, err) + return fmt.Errorf("error walking path %s: %v", actualPath, err) + } + fp := strings.TrimPrefix(actualPath, workspace) + fp = strings.TrimPrefix(fp, string(filepath.Separator)) + if fp == "" { + fp = "." } - isRoot := fp == "." - name := fi.Name() - shortname, extension := calculateShortnameExtension(name) + name := d.Name() + _, extension := calculateShortnameExtension(name) - if isRoot { - name = string([]byte{0x00}) - shortname = name + fi, err := d.Info() + if err != nil { + return fmt.Errorf("could not get file info for %s: %v", fp, err) + } + entry, err = finalizeFileInfoFromFile(fp, actualPath, fi) + if err != nil { + return err } - entry = &finalizeFileInfo{path: fp, name: name, isDir: fi.IsDir(), isRoot: isRoot, modTime: fi.ModTime(), mode: fi.Mode(), size: fi.Size(), shortname: shortname} + entry.serial = serial + serial++ // we will have to save it as its parent parentDir := filepath.Dir(fp) @@ -841,7 +944,7 @@ func walkTree(workspace string) ([]*finalizeFileInfo, map[string]*finalizeFileIn if fi.IsDir() { entry.children = make([]*finalizeFileInfo, 0, 20) dirList[fp] = entry - if !isRoot { + if !entry.isRoot { parentDirInfo.children = append(parentDirInfo.children, entry) dirList[parentDir] = parentDirInfo } @@ -858,8 +961,6 @@ func walkTree(workspace string) ([]*finalizeFileInfo, map[string]*finalizeFileIn if err != nil { return nil, nil, err } - // reset the workspace - _ = os.Chdir(cwd) return fileList, dirList, nil } diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/iso9660.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/iso9660.go index c6c1321c646..64b28ed2ee1 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/iso9660.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/iso9660.go @@ -34,15 +34,15 @@ type FileSystem struct { } // Equal compare if two filesystems are equal -func (fs *FileSystem) Equal(a *FileSystem) bool { - localMatch := fs.file == a.file && fs.size == a.size - vdMatch := fs.volumes.equal(&a.volumes) +func (fsm *FileSystem) Equal(a *FileSystem) bool { + localMatch := fsm.file == a.file && fsm.size == a.size + vdMatch := fsm.volumes.equal(&a.volumes) return localMatch && vdMatch } // Workspace get the workspace path -func (fs *FileSystem) Workspace() string { - return fs.workspace +func (fsm *FileSystem) Workspace() string { + return fsm.workspace } // Create creates an ISO9660 filesystem in a given directory @@ -283,7 +283,7 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) { } // Type returns the type code for the filesystem. Always returns filesystem.TypeFat32 -func (fs *FileSystem) Type() filesystem.Type { +func (fsm *FileSystem) Type() filesystem.Type { return filesystem.TypeISO9660 } @@ -293,11 +293,11 @@ func (fs *FileSystem) Type() filesystem.Type { // * It will not return an error if the path already exists // // if readonly and not in workspace, will return an error -func (fs *FileSystem) Mkdir(p string) error { - if fs.workspace == "" { +func (fsm *FileSystem) Mkdir(p string) error { + if fsm.workspace == "" { return fmt.Errorf("cannot write to read-only filesystem") } - err := os.MkdirAll(path.Join(fs.workspace, p), 0o755) + err := os.MkdirAll(path.Join(fsm.workspace, p), 0o755) if err != nil { return fmt.Errorf("could not create directory %s: %v", p, err) } @@ -310,12 +310,12 @@ func (fs *FileSystem) Mkdir(p string) error { // Returns a slice of os.FileInfo with all of the entries in the directory. // // Will return an error if the directory does not exist or is a regular file and not a directory -func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { +func (fsm *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { var fi []os.FileInfo // non-workspace: read from iso9660 // workspace: read from regular filesystem - if fs.workspace != "" { - fullPath := path.Join(fs.workspace, p) + if fsm.workspace != "" { + fullPath := path.Join(fsm.workspace, p) // read the entries dirEntries, err := os.ReadDir(fullPath) if err != nil { @@ -329,7 +329,7 @@ func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { fi = append(fi, info) } } else { - dirEntries, err := fs.readDirectory(p) + dirEntries, err := fsm.readDirectory(p) if err != nil { return nil, fmt.Errorf("error reading directory %s: %v", p, err) } @@ -351,7 +351,7 @@ func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { // accepts normal os.OpenFile flags // // returns an error if the file does not exist -func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { +func (fsm *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { var f filesystem.File var err error @@ -366,14 +366,14 @@ func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { // cannot open to write or append or create if we do not have a workspace writeMode := flag&os.O_WRONLY != 0 || flag&os.O_RDWR != 0 || flag&os.O_APPEND != 0 || flag&os.O_CREATE != 0 || flag&os.O_TRUNC != 0 || flag&os.O_EXCL != 0 - if fs.workspace == "" { + if fsm.workspace == "" { if writeMode { return nil, fmt.Errorf("cannot write to read-only filesystem") } // get the directory entries var entries []*directoryEntry - entries, err = fs.readDirectory(dir) + entries, err = fsm.readDirectory(dir) if err != nil { return nil, fmt.Errorf("could not read directory entries for %s", dir) } @@ -405,7 +405,7 @@ func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { offset: 0, } } else { - f, err = os.OpenFile(path.Join(fs.workspace, p), flag, 0o644) + f, err = os.OpenFile(path.Join(fsm.workspace, p), flag, 0o644) if err != nil { return nil, fmt.Errorf("target file %s does not exist: %v", p, err) } @@ -415,7 +415,7 @@ func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { } // readDirectory - read directory entry on iso only (not workspace) -func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) { +func (fsm *FileSystem) readDirectory(p string) ([]*directoryEntry, error) { var ( location, size uint32 err error @@ -424,7 +424,7 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) { // try from path table, then walk the directory tree, unless we were told explicitly not to usePathtable := true - for _, e := range fs.suspExtensions { + for _, e := range fsm.suspExtensions { usePathtable = e.UsePathtable() if !usePathtable { break @@ -432,14 +432,14 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) { } if usePathtable { - location = fs.pathTable.getLocation(p) + location = fsm.pathTable.getLocation(p) } // if we found it, read the first directory entry to get the size if location != 0 { // we need 4 bytes to read the size of the directory; it is at offset 10 from beginning dirb := make([]byte, 4) - n, err = fs.file.ReadAt(dirb, int64(location)*fs.blocksize+10) + n, err = fsm.file.ReadAt(dirb, int64(location)*fsm.blocksize+10) if err != nil { return nil, fmt.Errorf("could not read directory %s: %v", p, err) } @@ -451,7 +451,7 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) { } else { // if we could not find the location in the path table, try reading directly from the disk // it is slow, but this is how Unix does it, since many iso creators *do* create illegitimate disks - location, size, err = fs.rootDir.getLocation(p) + location, size, err = fsm.rootDir.getLocation(p) if err != nil { return nil, fmt.Errorf("unable to read directory tree for %s: %v", p, err) } @@ -464,7 +464,7 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) { // we have a location, let's read the directories from it b := make([]byte, size) - n, err = fs.file.ReadAt(b, int64(location)*fs.blocksize) + n, err = fsm.file.ReadAt(b, int64(location)*fsm.blocksize) if err != nil { return nil, fmt.Errorf("could not read directory entries for %s: %v", p, err) } @@ -472,7 +472,7 @@ func (fs *FileSystem) readDirectory(p string) ([]*directoryEntry, error) { return nil, fmt.Errorf("reading directory %s returned %d bytes read instead of expected %d", p, n, size) } // parse the entries - entries, err := parseDirEntries(b, fs) + entries, err := parseDirEntries(b, fsm) if err != nil { return nil, fmt.Errorf("could not parse directory entries for %s: %v", p, err) } @@ -488,13 +488,13 @@ func validateBlocksize(blocksize int64) error { } } -func (fs *FileSystem) Label() string { - if fs.volumes.primary == nil { +func (fsm *FileSystem) Label() string { + if fsm.volumes.primary == nil { return "" } - return fs.volumes.primary.volumeIdentifier + return fsm.volumes.primary.volumeIdentifier } -func (fs *FileSystem) SetLabel(string) error { +func (fsm *FileSystem) SetLabel(string) error { return fmt.Errorf("ISO9660 filesystem is read-only") } diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/rockridge.go b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/rockridge.go index 8a8c80a1340..ed20a1d7971 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/rockridge.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/iso9660/rockridge.go @@ -6,8 +6,6 @@ import ( "os" "sort" "time" - - "gopkg.in/djherbis/times.v1" ) const ( @@ -96,53 +94,37 @@ func (r *rockRidgeExtension) GetFilename(de *directoryEntry) (string, error) { } return name, nil } -func (r *rockRidgeExtension) GetFileExtensions(fp string, isSelf, isParent bool) ([]directoryEntrySystemUseExtension, error) { +func (r *rockRidgeExtension) GetFileExtensions(ffi *finalizeFileInfo, isSelf, isParent bool) ([]directoryEntrySystemUseExtension, error) { // we always do PX, TF, NM, SL order ret := []directoryEntrySystemUseExtension{} - // do not follow symlinks - fi, err := os.Lstat(fp) - if err != nil { - return nil, fmt.Errorf("error reading file %s: %v", fp, err) - } - - t, err := times.Lstat(fp) - if err != nil { - return nil, fmt.Errorf("error reading times %s: %v", fp, err) - } // PX - nlink, uid, gid := statt(fi) - mtime := fi.ModTime() - atime := t.AccessTime() - ctime := t.ChangeTime() + mtime := ffi.ModTime() ret = append(ret, rockRidgePosixAttributes{ - mode: fi.Mode(), - linkCount: nlink, - uid: uid, - gid: gid, + mode: ffi.Mode(), + linkCount: ffi.Nlink(), + uid: ffi.UID(), + gid: ffi.GID(), length: r.pxLength, + serial: ffi.serial, }) // TF tf := rockRidgeTimestamps{longForm: false, stamps: []rockRidgeTimestamp{ {timestampType: rockRidgeTimestampModify, time: mtime}, - {timestampType: rockRidgeTimestampAccess, time: atime}, - {timestampType: rockRidgeTimestampAttribute, time: ctime}, + {timestampType: rockRidgeTimestampAccess, time: ffi.AccessTime()}, + {timestampType: rockRidgeTimestampAttribute, time: ffi.ChangeTime()}, }} ret = append(ret, tf) // NM if !isSelf && !isParent { - ret = append(ret, rockRidgeName{name: fi.Name()}) + ret = append(ret, rockRidgeName{name: ffi.name}) } // SL - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + if ffi.Mode()&os.ModeSymlink == os.ModeSymlink { // need the target if it is a symlink - target, err := os.Readlink(fp) - if err != nil { - return nil, fmt.Errorf("error reading symlink target at %s", fp) - } - ret = append(ret, rockRidgeSymlink{continued: false, name: target}) + ret = append(ret, rockRidgeSymlink{continued: false, name: ffi.LinkTarget()}) } return ret, nil @@ -293,7 +275,7 @@ type rockRidgePosixAttributes struct { linkCount uint32 uid uint32 gid uint32 - serial uint32 + serial uint64 } func (d rockRidgePosixAttributes) Equal(o directoryEntrySystemUseExtension) bool { @@ -362,8 +344,7 @@ func (d rockRidgePosixAttributes) Data() []byte { binary.LittleEndian.PutUint32(ret[24:28], d.gid) binary.BigEndian.PutUint32(ret[28:32], d.gid) if d.length == 44 { - binary.LittleEndian.PutUint32(ret[32:36], d.serial) - binary.BigEndian.PutUint32(ret[36:40], d.serial) + binary.LittleEndian.PutUint64(ret[32:40], d.serial) } return ret } @@ -427,9 +408,9 @@ func (r *rockRidgeExtension) parsePosixAttributes(b []byte) (directoryEntrySyste m |= uint32(os.ModeNamedPipe) } - var serial uint32 + var serial uint64 if len(b) == 44 { - serial = binary.LittleEndian.Uint32(b[36:40]) + serial = binary.LittleEndian.Uint64(b[36:44]) } return rockRidgePosixAttributes{ mode: os.FileMode(m), diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go index b608d551604..173edabf80f 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/compressor.go @@ -7,6 +7,7 @@ import ( "fmt" "io" + "github.com/klauspost/compress/zstd" "github.com/pierrec/lz4/v4" "github.com/ulikunitz/xz" "github.com/ulikunitz/xz/lzma" @@ -51,6 +52,8 @@ func (c *CompressorLzma) decompress(in []byte) ([]byte, error) { } return p, nil } + +//nolint:unused,revive // it is important to implement the interface func (c *CompressorLzma) loadOptions(b []byte) error { // lzma has no supported optiosn return nil @@ -107,6 +110,9 @@ func (c *CompressorGzip) decompress(in []byte) ([]byte, error) { if err != nil { return nil, fmt.Errorf("error decompressing: %v", err) } + if err := gz.Close(); err != nil { + return nil, err + } return p, nil } @@ -326,6 +332,32 @@ func (c *CompressorZstd) optionsBytes() []byte { func (c *CompressorZstd) flavour() compression { return compressionZstd } +func (c *CompressorZstd) compress(in []byte) ([]byte, error) { + var b bytes.Buffer + z, err := zstd.NewWriter(&b) + if err != nil { + return nil, fmt.Errorf("failed to create zstd compressor: %w", err) + } + if _, err := z.Write(in); err != nil { + return nil, err + } + if err := z.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} +func (c *CompressorZstd) decompress(in []byte) ([]byte, error) { + z, err := zstd.NewReader(nil) + if err != nil { + return nil, fmt.Errorf("failed to create zstd decompressor: %w", err) + } + defer z.Close() + p, err := z.DecodeAll(in, nil) + if err != nil { + return nil, fmt.Errorf("error decompressing zstd: %w", err) + } + return p, nil +} func newCompressor(flavour compression) (Compressor, error) { var c Compressor @@ -343,7 +375,7 @@ func newCompressor(flavour compression) (Compressor, error) { case compressionLz4: c = &CompressorLz4{} case compressionZstd: - return nil, fmt.Errorf("zstd compression not yet supported") + c = &CompressorZstd{} default: return nil, fmt.Errorf("unknown compression type: %d", flavour) } diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directory.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directory.go index 0f5bd8d5c34..ce7627b382d 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directory.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directory.go @@ -54,18 +54,17 @@ type directoryEntryGroup struct { // parse raw bytes of a directory to get the contents func parseDirectory(b []byte) (*directory, error) { - // must have at least one header - if _, err := parseDirectoryHeader(b); err != nil { - return nil, fmt.Errorf("could not parse directory header: %v", err) - } entries := make([]*directoryEntryRaw, 0) - for pos := 0; pos+dirHeaderSize < len(b); { + for pos := 0; pos+dirHeaderSize <= len(b); { directoryHeader, err := parseDirectoryHeader(b[pos:]) if err != nil { return nil, fmt.Errorf("could not parse directory header: %v", err) } - if directoryHeader.count+1 > maxDirEntries { - return nil, fmt.Errorf("corrupted directory, had %d entries instead of max %d", directoryHeader.count+1, maxDirEntries) + if directoryHeader.count == 0 { + return nil, fmt.Errorf("corrupted directory, must have at least one entry") + } + if directoryHeader.count > maxDirEntries { + return nil, fmt.Errorf("corrupted directory, had %d entries instead of max %d", directoryHeader.count, maxDirEntries) } pos += dirHeaderSize for count := uint32(0); count < directoryHeader.count; count++ { diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directoryentry.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directoryentry.go index cc5beb75de4..040212a5825 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directoryentry.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/directoryentry.go @@ -1,50 +1,16 @@ package squashfs import ( + "fmt" + "io/fs" "os" "time" + + "github.com/diskfs/go-diskfs/filesystem" ) // FileStat is the extended data underlying a single file, similar to https://golang.org/pkg/syscall/#Stat_t -type FileStat struct { - uid uint32 - gid uint32 - xattrs map[string]string -} - -func (f *FileStat) equal(o *FileStat) bool { - if f.uid != o.uid || f.gid != o.gid { - return false - } - if len(f.xattrs) != len(o.xattrs) { - return false - } - for k, v := range f.xattrs { - ov, ok := o.xattrs[k] - if !ok { - return false - } - if ov != v { - return false - } - } - return true -} - -// UID get uid of file -func (f *FileStat) UID() uint32 { - return f.uid -} - -// GID get gid of file -func (f *FileStat) GID() uint32 { - return f.gid -} - -// Xattrs get extended attributes of file -func (f *FileStat) Xattrs() map[string]string { - return f.xattrs -} +type FileStat = *directoryEntry // directoryEntry is a single directory entry // it combines information from inode and the actual entry @@ -57,22 +23,22 @@ func (f *FileStat) Xattrs() map[string]string { // IsDir() bool // abbreviation for Mode().IsDir() // Sys() interface{} // underlying data source (can return nil) type directoryEntry struct { - isSubdirectory bool + fs *FileSystem // the FileSystem this entry is part of name string size int64 modTime time.Time mode os.FileMode inode inode - sys FileStat + uid uint32 + gid uint32 + xattrs map[string]string + isSubdirectory bool } func (d *directoryEntry) equal(o *directoryEntry) bool { if o == nil { return false } - if !d.sys.equal(&o.sys) { - return false - } if d.inode == nil && o.inode == nil { return true } @@ -107,10 +73,127 @@ func (d *directoryEntry) ModTime() time.Time { // Mode FileMode // file mode bits func (d *directoryEntry) Mode() os.FileMode { - return d.mode + mode := d.mode + + // We need to adjust the Linux mode into a Go mode + // The bottom 3*3 bits are the traditional unix permissions. + + // Clear the non permissions bits + mode &= os.ModePerm + + if d.inode == nil { + return mode + } + switch d.inode.inodeType() { + case inodeBasicDirectory, inodeExtendedDirectory: + mode |= os.ModeDir // d: is a directory + case inodeBasicFile, inodeExtendedFile: + // zero mode + case inodeBasicSymlink, inodeExtendedSymlink: + mode |= os.ModeSymlink // L: symbolic link + case inodeBasicBlock, inodeExtendedBlock: + mode |= os.ModeDevice // D: device file + case inodeBasicChar, inodeExtendedChar: + mode |= os.ModeDevice // D: device file + mode |= os.ModeCharDevice // c: Unix character device, when ModeDevice is set + case inodeBasicFifo, inodeExtendedFifo: + mode |= os.ModeNamedPipe // p: named pipe (FIFO) + case inodeBasicSocket, inodeExtendedSocket: + mode |= os.ModeSocket // S: Unix domain socket + default: + mode |= os.ModeIrregular // ?: non-regular file; nothing else is known about this file + } + + // Not currently translated + // mode |= os.ModeAppend // a: append-only + // mode |= os.ModeExclusive // l: exclusive use + // mode |= os.ModeTemporary // T: temporary file; Plan 9 only + // mode |= os.ModeSetuid // u: setuid + // mode |= os.ModeSetgid // g: setgid + // mode |= os.ModeSticky // t: sticky + + return mode } // Sys interface{} // underlying data source (can return nil) func (d *directoryEntry) Sys() interface{} { - return d.sys + return d +} + +// UID get uid of file +func (d *directoryEntry) UID() uint32 { + return d.uid +} + +// GID get gid of file +func (d *directoryEntry) GID() uint32 { + return d.gid +} + +// Xattrs get extended attributes of file +func (d *directoryEntry) Xattrs() map[string]string { + return d.xattrs +} + +// Readlink returns the destination of the symbolic link if this entry +// is a symbolic link. +// +// If this entry is not a symbolic link then it will return fs.ErrNotExist +func (d *directoryEntry) Readlink() (string, error) { + var target string + body := d.inode.getBody() + //nolint:exhaustive // all other cases fall under default + switch d.inode.inodeType() { + case inodeBasicSymlink: + link, ok := body.(*basicSymlink) + if !ok { + return "", fmt.Errorf("internal error: inode wasn't basic symlink: %T", body) + } + target = link.target + case inodeExtendedSymlink: + link, ok := body.(*extendedSymlink) + if !ok { + return "", fmt.Errorf("internal error: inode wasn't extended symlink: %T", body) + } + target = link.target + default: + return "", fs.ErrNotExist + } + return target, nil +} + +// Open returns an filesystem.File from which you can read the +// contents of a file. +// +// Calling this on anything but a file will return an error. +// +// Calling this Open method is more efficient than calling +// FileSystem.OpenFile as it doesn't have to find the file by +// traversing the directory entries first. +func (d *directoryEntry) Open() (filesystem.File, error) { + // get the inode data for this file + // now open the file + // get the inode for the file + var eFile *extendedFile + in := d.inode + iType := in.inodeType() + body := in.getBody() + //nolint:exhaustive // all other cases fall under default + switch iType { + case inodeBasicFile: + extFile := body.(*basicFile).toExtended() + eFile = &extFile + case inodeExtendedFile: + eFile, _ = body.(*extendedFile) + default: + return nil, fmt.Errorf("inode is of type %d, neither basic nor extended file", iType) + } + + return &File{ + extendedFile: eFile, + isReadWrite: false, + isAppend: false, + offset: 0, + filesystem: d.fs, + }, nil } diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/file.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/file.go index 93487dd3ec2..1b77d9698b7 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/file.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/file.go @@ -13,10 +13,12 @@ import ( // include all of the data type File struct { *extendedFile - isReadWrite bool - isAppend bool - offset int64 - filesystem *FileSystem + isReadWrite bool + isAppend bool + offset int64 + filesystem *FileSystem + blockLocation int64 // the position of the last block decompressed + block []byte // the actual last block decompressed } // Read reads up to len(b) bytes from the File. @@ -42,9 +44,9 @@ func (fl *File) Read(b []byte) (int, error) { // e.g. if starting block is at position 10245, then we want blocks 27,28,29 from the disk // 5- read in and uncompress the necessary blocks fs := fl.filesystem - size := int(fl.size()) - int(fl.offset) + size := fl.size() - fl.offset location := int64(fl.startBlock) - maxRead := size + maxRead := len(b) // if there is nothing left to read, just return EOF if size <= 0 { @@ -54,14 +56,14 @@ func (fl *File) Read(b []byte) (int, error) { // we stop when we hit the lesser of // 1- len(b) // 2- file end - if len(b) < maxRead { - maxRead = len(b) + if size < int64(maxRead) { + maxRead = int(size) } // just read the requested number of bytes and change our offset // figure out which block number has the bytes we are looking for startBlock := int(fl.offset / fs.blocksize) - endBlock := int((fl.offset + int64(maxRead)) / fs.blocksize) + endBlock := int((fl.offset + int64(maxRead) - 1) / fs.blocksize) // do we end in fragment territory? fragments := false @@ -71,46 +73,80 @@ func (fl *File) Read(b []byte) (int, error) { } read := 0 - offset := fl.offset + offsetEnd := fl.offset + int64(maxRead) + pos := int64(0) + + // send input to b, clipping as appropriate + outputBlock := func(input []byte) { + inputSize := int64(len(input)) + start := fl.offset - pos + end := offsetEnd - pos + if start >= 0 && start < inputSize { + if end > inputSize { + end = inputSize + } + n := copy(b[read:], input[start:end]) + read += n + fl.offset += int64(n) + } + } + // we need to cycle through all of the blocks to find where the desired one starts for i, block := range fl.blockSizes { - if i > endBlock || read > maxRead { + if i > endBlock || read >= maxRead { break } // if we are in the range of desired ones, read it in if i >= startBlock { - input, err := fs.readBlock(location, block.compressed, block.size) - if err != nil { - return read, fmt.Errorf("error reading data block %d from squashfs: %v", i, err) + if int64(block.size) > fs.blocksize { + return read, fmt.Errorf("unexpected block.size=%d > fs.blocksize=%d", block.size, fs.blocksize) } - // we do not need to limit it to the remaining space of b, since copy() only will copy - // to what space it has in b - copy(b[read:], input[offset:]) - read += len(input) - fl.offset += int64(read) - offset = 0 + var input []byte + if fl.blockLocation == location && fl.block != nil { + // Read last block from cache + input = fl.block + } else { + var err error + input, err = fs.readBlock(location, block.compressed, block.size) + if err != nil { + return read, fmt.Errorf("error reading data block %d from squashfs: %v", i, err) + } + // Cache the last block + fl.blockLocation = location + fl.block = input + } + outputBlock(input) } location += int64(block.size) + pos += fs.blocksize } + // did we have a fragment to read? - if fragments { + if read < maxRead && fragments { + if fl.fragmentBlockIndex == 0xffffffff { + return read, fmt.Errorf("expecting fragment to read %d bytes but no fragment found", maxRead-read) + } input, err := fs.readFragment(fl.fragmentBlockIndex, fl.fragmentOffset, fl.size()%fs.blocksize) if err != nil { return read, fmt.Errorf("error reading fragment block %d from squashfs: %v", fl.fragmentBlockIndex, err) } - copy(b[read:], input) + pos = int64(len(fl.blockSizes)) * fs.blocksize + outputBlock(input) } - fl.offset += int64(maxRead) var retErr error - if fl.offset >= int64(size) { + if fl.offset >= fl.size() { retErr = io.EOF + } else if read == 0 { + retErr = fmt.Errorf("internal error: read no bytes") } - return maxRead, retErr + return read, retErr } // Write writes len(b) bytes to the File. // // you cannot write to a finished squashfs, so this returns an error +// +//nolint:unused,revive // but it is important to implement the interface func (fl *File) Write(p []byte) (int, error) { return 0, fmt.Errorf("cannot write to a read-only squashfs filesystem") } diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize.go index f85c28febf3..7c99e4f2af9 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "fmt" "io" + iofs "io/fs" "os" "path" "path/filepath" @@ -415,21 +416,24 @@ func finalizeFragment(buf []byte, to util.File, toOffset int64, c Compressor) (r // because the inode data is different. // The first entry in the return always will be the root func walkTree(workspace string) ([]*finalizeFileInfo, error) { - cwd, err := os.Getwd() - if err != nil { - return nil, fmt.Errorf("could not get pwd: %v", err) - } - // make everything relative to the workspace - _ = os.Chdir(workspace) dirMap := make(map[string]*finalizeFileInfo) fileList := make([]*finalizeFileInfo, 0) var entry *finalizeFileInfo - _ = filepath.Walk(".", func(fp string, fi os.FileInfo, err error) error { + err := filepath.WalkDir(workspace, func(actualPath string, d iofs.DirEntry, err error) error { if err != nil { return err } + fp := strings.TrimPrefix(actualPath, workspace) + fp = strings.TrimPrefix(fp, string(filepath.Separator)) + if fp == "" { + fp = "." + } isRoot := fp == "." - name := fi.Name() + name := d.Name() + fi, err := d.Info() + if err != nil { + return fmt.Errorf("could not get file info for %s: %v", fp, err) + } m := fi.Mode() var fType fileType switch { @@ -448,7 +452,7 @@ func walkTree(workspace string) ([]*finalizeFileInfo, error) { default: fType = fileRegular } - xattrNames, err := xattr.List(fp) + xattrNames, err := xattr.List(actualPath) if err != nil { return fmt.Errorf("unable to list xattrs for %s: %v", fp, err) } @@ -495,8 +499,9 @@ func walkTree(workspace string) ([]*finalizeFileInfo, error) { fileList = append(fileList, entry) return nil }) - // reset the workspace - _ = os.Chdir(cwd) + if err != nil { + return nil, err + } return fileList, nil } @@ -750,7 +755,7 @@ func writeDirectories(dirs []*finalizeFileInfo, f util.File, compressor Compress // writeFragmentTable write the fragment table // -//nolint:unparam // this does not use fragmentBlocksStart yet, but only because we have not yet added support +//nolint:unparam,unused,revive // this does not use fragmentBlocksStart yet, but only because we have not yet added support func writeFragmentTable(fragmentBlocks []fragmentBlock, fragmentBlocksStart int64, f util.File, compressor Compressor, location int64) (fragmentsWritten int, finalLocation uint64, err error) { // now write the actual fragment table entries var ( diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize_wasip1.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize_wasip1.go new file mode 100644 index 00000000000..f848c987f0f --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/finalize_wasip1.go @@ -0,0 +1,18 @@ +//go:build wasip1 +// +build wasip1 + +//nolint:unconvert // linter gets confused in this file +package squashfs + +import ( + "errors" + "os" +) + +func getDeviceNumbers(path string) (major, minor uint32, err error) { + return 0, 0, errors.New("not implemented") +} + +func getFileProperties(fi os.FileInfo) (links, uid, gid uint32) { + return 0, 0, 0 +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/inode.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/inode.go index 7a690ca686c..a08852b8ba2 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/inode.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/inode.go @@ -143,11 +143,10 @@ func parseBlockData(u uint32) *blockData { size: u & 0x00ffffff, } } -func parseFileBlockSizes(b []byte, fileSize, blocksize int) []*blockData { - count := fileSize / blocksize - blocks := make([]*blockData, 0) - for j := 0; j < count && j < len(b); j += 4 { - blocks = append(blocks, parseBlockData(binary.LittleEndian.Uint32(b[j:j+4]))) +func parseFileBlockSizes(b []byte, blockListSize int) []*blockData { + blocks := make([]*blockData, 0, blockListSize) + for j := 0; j < blockListSize && j < len(b); j++ { + blocks = append(blocks, parseBlockData(binary.LittleEndian.Uint32(b[4*j:4*j+4]))) } return blocks } @@ -440,14 +439,14 @@ func parseBasicFile(b []byte, blocksize int) (*basicFile, int, error) { fileSize: fileSize, } // see how many other bytes we need to read - blockListSize := int(d.fileSize) / blocksize - if int(d.fileSize)%blocksize > 0 && d.fragmentBlockIndex != 0xffffffff { + blockListSize := int(d.fileSize / uint32(blocksize)) + if d.fileSize%uint32(blocksize) > 0 && d.fragmentBlockIndex == 0xffffffff { blockListSize++ } // do we have enough data left to read those? extra = blockListSize * 4 if len(b[16:]) >= extra { - d.blockSizes = parseFileBlockSizes(b[16:], int(fileSize), blocksize) + d.blockSizes = parseFileBlockSizes(b[16:], blockListSize) extra = 0 } @@ -531,14 +530,14 @@ func parseExtendedFile(b []byte, blocksize int) (*extendedFile, int, error) { xAttrIndex: binary.LittleEndian.Uint32(b[36:40]), } // see how many other bytes we need to read - blockListSize := int(d.fileSize) / blocksize - if int(d.fileSize)%blocksize > 0 && d.fragmentBlockIndex != 0xffffffff { + blockListSize := int(d.fileSize / uint64(blocksize)) + if d.fileSize%uint64(blocksize) > 0 && d.fragmentBlockIndex == 0xffffffff { blockListSize++ } // do we have enough data left to read those? extra = blockListSize * 4 - if len(b[16:]) >= extra { - d.blockSizes = parseFileBlockSizes(b[16:], int(fileSize), blocksize) + if len(b[40:]) >= extra { + d.blockSizes = parseFileBlockSizes(b[40:], blockListSize) extra = 0 } return d, extra, nil diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/lru.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/lru.go new file mode 100644 index 00000000000..9185d44c375 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/lru.go @@ -0,0 +1,138 @@ +package squashfs + +import ( + "sync" +) + +// A simple least recently used cache +type lru struct { + mu sync.Mutex + cache map[int64]*lruBlock // cache keyed on block position in file + maxBlocks int // max number of blocks in cache + root lruBlock // root block in LRU circular list +} + +// A data block to store in the lru cache +type lruBlock struct { + mu sync.Mutex // lock while fetching + data []byte // data block - nil while being fetched + prev *lruBlock // prev block in LRU list + next *lruBlock // next block in LRU list + pos int64 // position it was read off disk + size uint16 // compressed size on disk +} + +// Create a new LRU cache of a maximum of maxBlocks blocks of size +func newLRU(maxBlocks int) *lru { + l := &lru{ + cache: make(map[int64]*lruBlock), + maxBlocks: maxBlocks, + root: lruBlock{ + pos: -1, + }, + } + l.root.prev = &l.root // circularly link the root node + l.root.next = &l.root + return l +} + +// Unlink the block from the list +func (l *lru) unlink(block *lruBlock) { + block.prev.next = block.next + block.next.prev = block.prev + block.prev = nil + block.next = nil +} + +// Pop a block from the end of the list +func (l *lru) pop() *lruBlock { + block := l.root.prev + if block == &l.root { + panic("internal error: list empty") + } + l.unlink(block) + return block +} + +// Add a block to the start of the list +func (l *lru) push(block *lruBlock) { + oldHead := l.root.next + l.root.next = block + block.prev = &l.root + block.next = oldHead + oldHead.prev = block +} + +// ensure there are no more than n blocks in the cache +func (l *lru) trim(maxBlocks int) { + for len(l.cache) > maxBlocks && len(l.cache) > 0 { + // Remove a block from the cache + block := l.pop() + delete(l.cache, block.pos) + } +} + +// add block to the cache, pruning the cache as appropriate +func (l *lru) add(block *lruBlock) { + l.trim(l.maxBlocks - 1) + l.cache[block.pos] = block + l.push(block) +} + +// Fetch data returning size used from input and error +// +// data should be a subslice of buf +type fetchFn func() (data []byte, size uint16, err error) + +// Get the block at pos from the cache. +// +// If it isn't found in the cache then fetch() is called to get it. +// +// This does read through caching and takes care not to block parallel +// calls to the fetch() function. +func (l *lru) get(pos int64, fetch fetchFn) (data []byte, size uint16, err error) { + if l == nil { + return fetch() + } + l.mu.Lock() + block, found := l.cache[pos] + if !found { + // Add an empty block with data == nil + block = &lruBlock{ + pos: pos, + } + // Add it to the cache and the tail of the list + l.add(block) + } else { + // Remove the block from the list + l.unlink(block) + // Add it back to the start + l.push(block) + } + block.mu.Lock() // transfer the lock to the block + l.mu.Unlock() + defer block.mu.Unlock() + + if block.data != nil { + return block.data, block.size, nil + } + + // Fetch the block + data, size, err = fetch() + if err != nil { + return nil, 0, err + } + block.data = data + block.size = size + return data, size, nil +} + +// Sets the number of blocks to be used in the cache +// +// It makes sure that there are no more than maxBlocks in the cache. +func (l *lru) setMaxBlocks(maxBlocks int) { + l.mu.Lock() + defer l.mu.Unlock() + l.maxBlocks = maxBlocks + l.trim(l.maxBlocks) +} diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/metadatablock.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/metadatablock.go index 6711f4ef0f5..8ef65098fd7 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/metadatablock.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/metadatablock.go @@ -26,6 +26,7 @@ func getMetadataSize(b []byte) (size uint16, compressed bool, err error) { return size, compressed, nil } +// FIXME this isn't used anywhere except in the test code func parseMetadata(b []byte, c Compressor) (block *metadatablock, err error) { if len(b) < minMetadataBlockSize { return nil, fmt.Errorf("metadata block was of len %d, less than minimum %d", len(b), minMetadataBlockSize) @@ -71,33 +72,35 @@ func (m *metadatablock) toBytes(c Compressor) ([]byte, error) { return b, nil } -func readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, size uint16, err error) { - // read bytes off the reader to determine how big it is and if compressed - b := make([]byte, 2) - _, _ = r.ReadAt(b, location) - size, compressed, err := getMetadataSize(b) - if err != nil { - return nil, 0, fmt.Errorf("error getting size and compression for metadata block at %d: %v", location, err) - } - b = make([]byte, size) - read, err := r.ReadAt(b, location+2) - if err != nil && err != io.EOF { - return nil, 0, fmt.Errorf("unable to read metadata block of size %d at location %d: %v", size, location, err) - } - if read != len(b) { - return nil, 0, fmt.Errorf("read %d instead of expected %d bytes for metadata block at location %d", read, size, location) - } - data = b - if compressed { - if c == nil { - return nil, 0, fmt.Errorf("metadata block at %d compressed, but no compressor provided", location) - } - data, err = c.decompress(b) +func (fs *FileSystem) readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, size uint16, err error) { + return fs.cache.get(location, func() (data []byte, size uint16, err error) { + // read bytes off the reader to determine how big it is and if compressed + b := make([]byte, 2) + _, _ = r.ReadAt(b, location) + size, compressed, err := getMetadataSize(b) if err != nil { - return nil, 0, fmt.Errorf("decompress error: %v", err) + return nil, 0, fmt.Errorf("error getting size and compression for metadata block at %d: %v", location, err) } - } - return data, size + 2, nil + b = make([]byte, size) + read, err := r.ReadAt(b, location+2) + if err != nil && err != io.EOF { + return nil, 0, fmt.Errorf("unable to read metadata block of size %d at location %d: %v", size, location, err) + } + if read != len(b) { + return nil, 0, fmt.Errorf("read %d instead of expected %d bytes for metadata block at location %d", read, size, location) + } + data = b + if compressed { + if c == nil { + return nil, 0, fmt.Errorf("metadata block at %d compressed, but no compressor provided", location) + } + data, err = c.decompress(b) + if err != nil { + return nil, 0, fmt.Errorf("decompress error: %v", err) + } + } + return data, size + 2, nil + }) } // readMetadata read as many bytes of metadata as required for the given size, with the byteOffset provided as a starting @@ -105,13 +108,13 @@ func readMetaBlock(r io.ReaderAt, c Compressor, location int64) (data []byte, si // requests to read 500 bytes beginning at offset 8000 into the first block. // it always returns to the end of the block, even if that is greater than the given size. This makes it easy to use more // data than expected on first read. The consumer is expected to cut it down, if needed -func readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOffset uint32, byteOffset uint16, size int) ([]byte, error) { +func (fs *FileSystem) readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOffset uint32, byteOffset uint16, size int) ([]byte, error) { var ( b []byte blockOffset = int(initialBlockOffset) ) // we know how many blocks, so read them all in - m, read, err := readMetaBlock(r, c, firstBlock+int64(blockOffset)) + m, read, err := fs.readMetaBlock(r, c, firstBlock+int64(blockOffset)) if err != nil { return nil, err } @@ -119,7 +122,7 @@ func readMetadata(r io.ReaderAt, c Compressor, firstBlock int64, initialBlockOff // do we have any more to read? for len(b) < size { blockOffset += int(read) - m, read, err = readMetaBlock(r, c, firstBlock+int64(blockOffset)) + m, read, err = fs.readMetaBlock(r, c, firstBlock+int64(blockOffset)) if err != nil { return nil, err } diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/squashfs.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/squashfs.go index 19f88f11278..bdd8d5a7eef 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/squashfs.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/squashfs.go @@ -17,6 +17,7 @@ const ( metadataBlockSize = 8 * KB minBlocksize = 4 * KB maxBlocksize = 1 * MB + defaultCacheSize = 128 * MB ) // FileSystem implements the FileSystem interface @@ -32,6 +33,7 @@ type FileSystem struct { uidsGids []uint32 xattrs *xAttrTable rootDir inode + cache *lru } // Equal compare if two filesystems are equal @@ -111,7 +113,38 @@ func Create(f util.File, size, start, blocksize int64) (*FileSystem, error) { // which allow you to work directly with partitions, rather than having to calculate (and hopefully not make any errors) // where a partition starts and ends. // -// If the provided blocksize is 0, it will use the default of 2K bytes +// If the provided blocksize is 0, it will use the default of 2K bytes. +// +// This will use a cache for the decompressed blocks of 128 MB by +// default. (You can set this with the SetCacheSize method and read +// its size with the GetCacheSize method). A block cache is essential +// for performance when reading. This implements a cache for the +// fragments (tail ends of files) and the metadata (directory +// listings) which otherwise would be read, decompressed and discarded +// many times. +// +// Unpacking a 3 GB squashfs made from the tensorflow docker image like this: +// +// docker export $(docker create tensorflow/tensorflow:latest-gpu-jupyter) -o tensorflow.tar.gz +// mkdir -p tensorflow && tar xf tensorflow.tar.gz -C tensorflow +// [ -f tensorflow.sqfs ] && rm tensorflow.sqfs +// mksquashfs tensorflow tensorflow.sqfs -comp zstd -Xcompression-level 3 -b 1M -no-xattrs -all-root +// +// Gives these timings with and without cache: +// +// - no caching: 206s +// - 256 MB cache: 16.7s +// - 128 MB cache: 17.5s (the default) +// - 64 MB cache: 23.4s +// - 32 MB cache: 54.s +// +// The cached versions compare favourably to the C program unsquashfs +// which takes 12.0s to unpack the same archive. +// +// These tests were done using rclone and the archive backend which +// uses this library like this: +// +// rclone -P --transfers 16 --checkers 16 copy :archive:/path/to/tensorflow.sqfs /tmp/tensorflow func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) { var ( read int @@ -147,7 +180,7 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) { // create the compressor function we will use compress, err := newCompressor(s.compression) if err != nil { - return nil, fmt.Errorf("unable to create compressor") + return nil, fmt.Errorf("unable to create compressor: %v", err) } // load fragments @@ -160,7 +193,7 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) { var ( xattrs *xAttrTable ) - if !s.noXattrs { + if !s.noXattrs && s.xattrTableStart != 0xffff_ffff_ffff_ffff { // xattr is right to the end of the disk xattrs, err = readXattrsTable(s, file, compress) if err != nil { @@ -180,11 +213,12 @@ func Read(file util.File, size, start, blocksize int64) (*FileSystem, error) { size: size, file: file, superblock: s, - blocksize: blocksize, + blocksize: int64(s.blocksize), // use the blocksize in the superblock xattrs: xattrs, compressor: compress, fragments: fragments, uidsGids: uidsgids, + cache: newLRU(int(defaultCacheSize) / int(s.blocksize)), } // for efficiency, read in the root inode right now rootInode, err := fs.getInode(s.rootInode.block, s.rootInode.offset, inodeBasicDirectory) @@ -200,6 +234,30 @@ func (fs *FileSystem) Type() filesystem.Type { return filesystem.TypeSquashfs } +// SetCacheSize set the maximum memory used by the block cache to cacheSize bytes. +// +// The default is 128 MB. +// +// If this is <= 0 then the cache will be disabled. +func (fs *FileSystem) SetCacheSize(cacheSize int) { + if fs.cache == nil { + return + } + blocks := cacheSize / int(fs.blocksize) + if blocks <= 0 { + blocks = 0 + } + fs.cache.setMaxBlocks(blocks) +} + +// GetCacheSize get the maximum memory used by the block cache in bytes. +func (fs *FileSystem) GetCacheSize() int { + if fs.cache == nil { + return 0 + } + return fs.cache.maxBlocks * int(fs.blocksize) +} + // Mkdir make a directory at the given path. It is equivalent to `mkdir -p`, i.e. idempotent, in that: // // * It will make the entire tree path if it does not exist @@ -307,30 +365,9 @@ func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { if targetEntry == nil { return nil, fmt.Errorf("target file %s does not exist", p) } - // get the inode data for this file - // now open the file - // get the inode for the file - var eFile *extendedFile - in := targetEntry.inode - iType := in.inodeType() - body := in.getBody() - //nolint:exhaustive // all other cases fall under default - switch iType { - case inodeBasicFile: - extFile := body.(*basicFile).toExtended() - eFile = &extFile - case inodeExtendedFile: - eFile, _ = body.(*extendedFile) - default: - return nil, fmt.Errorf("inode is of type %d, neither basic nor extended directory", iType) - } - - f = &File{ - extendedFile: eFile, - isReadWrite: false, - isAppend: false, - offset: 0, - filesystem: fs, + f, err = targetEntry.Open() + if err != nil { + return nil, err } } else { f, err = os.OpenFile(path.Join(fs.workspace, p), flag, 0o644) @@ -440,17 +477,16 @@ func (fs *FileSystem) hydrateDirectoryEntries(entries []*directoryEntryRaw) ([]* } } fullEntries = append(fullEntries, &directoryEntry{ + fs: fs, isSubdirectory: e.isSubdirectory, name: e.name, size: body.size(), modTime: header.modTime, mode: header.mode, inode: in, - sys: FileStat{ - uid: fs.uidsGids[header.uidIdx], - gid: fs.uidsGids[header.gidIdx], - xattrs: xattrs, - }, + uid: fs.uidsGids[header.uidIdx], + gid: fs.uidsGids[header.gidIdx], + xattrs: xattrs, }) } return fullEntries, nil @@ -464,7 +500,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod // get the block // start by getting the minimum for the proposed type. It very well might be wrong. size := inodeTypeToSize(iType) - uncompressed, err := readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) + uncompressed, err := fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) if err != nil { return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err) } @@ -475,6 +511,14 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod } if header.inodeType != iType { iType = header.inodeType + size = inodeTypeToSize(iType) + // Read more data if necessary (quite rare) + if size > len(uncompressed) { + uncompressed, err = fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) + if err != nil { + return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err) + } + } } // now read the body, which may have a variable size body, extra, err := parseInodeBody(uncompressed[inodeHeaderSize:], int(fs.blocksize), iType) @@ -484,7 +528,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod // if it returns extra > 0, then it needs that many more bytes to be read, and to be reparsed if extra > 0 { size += extra - uncompressed, err = readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) + uncompressed, err = fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.inodeTableStart), blockOffset, byteOffset, size) if err != nil { return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err) } @@ -504,7 +548,7 @@ func (fs *FileSystem) getInode(blockOffset uint32, byteOffset uint16, iType inod // block when uncompressed. func (fs *FileSystem) getDirectory(blockOffset uint32, byteOffset uint16, size int) (*directory, error) { // get the block - uncompressed, err := readMetadata(fs.file, fs.compressor, int64(fs.superblock.directoryTableStart), blockOffset, byteOffset, size) + uncompressed, err := fs.readMetadata(fs.file, fs.compressor, int64(fs.superblock.directoryTableStart), blockOffset, byteOffset, size) if err != nil { return nil, fmt.Errorf("error reading block at position %d: %v", blockOffset, err) } @@ -517,6 +561,10 @@ func (fs *FileSystem) getDirectory(blockOffset uint32, byteOffset uint16, size i } func (fs *FileSystem) readBlock(location int64, compressed bool, size uint32) ([]byte, error) { + // Zero size is a sparse block of blocksize + if size == 0 { + return make([]byte, fs.superblock.blocksize), nil + } b := make([]byte, size) read, err := fs.file.ReadAt(b, location) if err != nil && err != io.EOF { @@ -543,25 +591,32 @@ func (fs *FileSystem) readFragment(index, offset uint32, fragmentSize int64) ([] return nil, fmt.Errorf("cannot find fragment block with index %d", index) } fragmentInfo := fs.fragments[index] - // figure out the size of the compressed block and if it is compressed - b := make([]byte, fragmentInfo.size) - read, err := fs.file.ReadAt(b, int64(fragmentInfo.start)) - if err != nil && err != io.EOF { - return nil, fmt.Errorf("unable to read fragment block %d: %v", index, err) - } - if read != len(b) { - return nil, fmt.Errorf("read %d instead of expected %d bytes for fragment block %d", read, len(b), index) - } - - data := b - if fragmentInfo.compressed { - if fs.compressor == nil { - return nil, fmt.Errorf("fragment compressed but do not have valid compressor") + pos := int64(fragmentInfo.start) + data, _, err := fs.cache.get(pos, func() (data []byte, size uint16, err error) { + // figure out the size of the compressed block and if it is compressed + b := make([]byte, fragmentInfo.size) + read, err := fs.file.ReadAt(b, pos) + if err != nil && err != io.EOF { + return nil, 0, fmt.Errorf("unable to read fragment block %d: %v", index, err) } - data, err = fs.compressor.decompress(b) - if err != nil { - return nil, fmt.Errorf("decompress error: %v", err) + if read != len(b) { + return nil, 0, fmt.Errorf("read %d instead of expected %d bytes for fragment block %d", read, len(b), index) } + + data = b + if fragmentInfo.compressed { + if fs.compressor == nil { + return nil, 0, fmt.Errorf("fragment compressed but do not have valid compressor") + } + data, err = fs.compressor.decompress(b) + if err != nil { + return nil, 0, fmt.Errorf("decompress error: %v", err) + } + } + return data, 0, nil + }) + if err != nil { + return nil, err } // now get the data from the offset return data[offset : int64(offset)+fragmentSize], nil @@ -604,8 +659,9 @@ func readFragmentTable(s *superblock, file util.File, c Compressor) ([]*fragment // load in the actual fragment entries // read each block and uncompress it var fragmentTable []*fragmentEntry + var fs = &FileSystem{} for i, offset := range offsets { - uncompressed, _, err := readMetaBlock(file, c, offset) + uncompressed, _, err := fs.readMetaBlock(file, c, offset) if err != nil { return nil, fmt.Errorf("error reading meta block %d at position %d: %v", i, offset, err) } @@ -674,13 +730,14 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable, var ( uncompressed []byte size uint16 + fs = &FileSystem{} ) bIndex := make([]byte, 0) // convert those into indexes for i := 0; i+8-1 < len(b); i += 8 { locn := binary.LittleEndian.Uint64(b[i : i+8]) - uncompressed, _, err = readMetaBlock(file, c, int64(locn)) + uncompressed, _, err = fs.readMetaBlock(file, c, int64(locn)) if err != nil { return nil, fmt.Errorf("error reading xattr index meta block %d at position %d: %v", i, locn, err) } @@ -691,7 +748,7 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable, xAttrEnd := binary.LittleEndian.Uint64(b[:8]) xAttrData := make([]byte, 0) for i := xAttrStart; i < xAttrEnd; { - uncompressed, size, err = readMetaBlock(file, c, int64(i)) + uncompressed, size, err = fs.readMetaBlock(file, c, int64(i)) if err != nil { return nil, fmt.Errorf("error reading xattr data meta block at position %d: %v", i, err) } @@ -704,7 +761,7 @@ func readXattrsTable(s *superblock, file util.File, c Compressor) (*xAttrTable, return parseXattrsTable(xAttrData, bIndex, s.idTableStart, c) } -//nolint:unparam // this does not use offset or compressor yet, but only because we have not yet added support +//nolint:unparam,unused,revive // this does not use offset or compressor yet, but only because we have not yet added support func parseXattrsTable(bUIDXattr, bIndex []byte, offset uint64, c Compressor) (*xAttrTable, error) { // create the ID list var ( @@ -765,13 +822,14 @@ func readUidsGids(s *superblock, file util.File, c Compressor) ([]uint32, error) var ( uncompressed []byte + fs = &FileSystem{} ) data := make([]byte, 0) // convert those into indexes for i := 0; i+8-1 < len(b); i += 8 { locn := binary.LittleEndian.Uint64(b[i : i+8]) - uncompressed, _, err = readMetaBlock(file, c, int64(locn)) + uncompressed, _, err = fs.readMetaBlock(file, c, int64(locn)) if err != nil { return nil, fmt.Errorf("error reading uidgid index meta block %d at position %d: %v", i, locn, err) } diff --git a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/xattr.go b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/xattr.go index f63d4029a6c..e1894217d3d 100644 --- a/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/xattr.go +++ b/vendor/github.com/diskfs/go-diskfs/filesystem/squashfs/xattr.go @@ -45,7 +45,7 @@ func (x *xAttrTable) find(pos int) (map[string]string, error) { xattrs := map[string]string{} for i := 0; i < int(count); i++ { // must be 4 bytes for header - if len(b[pos:]) < 4 { + if len(b[ptr:]) < 4 { return nil, fmt.Errorf("insufficient bytes %d to read the xattr at position %d", len(b[ptr:]), ptr) } // get the type and size @@ -56,7 +56,7 @@ func (x *xAttrTable) find(pos int) (map[string]string, error) { valStart := valHeaderStart + 4 // make sure we have enough bytes if len(b[nameStart:]) < xSize { - return nil, fmt.Errorf("xattr header has size %d, but only %d bytes available to read at position %d", xSize, len(b[pos+4:]), ptr) + return nil, fmt.Errorf("xattr header has size %d, but only %d bytes available to read at position %d", xSize, len(b[ptr+4:]), ptr) } if xSize < 1 { return nil, fmt.Errorf("no name given for xattr at position %d", ptr) diff --git a/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go b/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go index a454ec23e94..bfa0e66270a 100644 --- a/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go +++ b/vendor/github.com/diskfs/go-diskfs/partition/gpt/partition.go @@ -291,3 +291,14 @@ func (p *Partition) sectorSizes() (physical, logical int) { func (p *Partition) Equal(o *Partition) bool { return p != nil && o != nil && *p == *o } + +// UUID returns the partitions UUID +func (p *Partition) UUID() string { + return p.GUID +} + +// Expand increases the size of the partition by a number of sectors +func (p *Partition) Expand(sectors uint64) { + p.End += sectors + p.Size += sectors * uint64(p.logicalSectorSize) +} diff --git a/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go b/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go index bbea768dc17..132b3578e01 100644 --- a/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go +++ b/vendor/github.com/diskfs/go-diskfs/partition/gpt/table.go @@ -20,6 +20,7 @@ const ( // just defaults physicalSectorSize = 512 logicalSectorSize = 512 + gptHeaderSector = 1 ) // Table represents a partition table to be applied to a disk or read from a disk @@ -106,7 +107,7 @@ func (t *Table) initTable(size int64) { t.secondaryHeader = diskSectors - 1 } if t.lastDataSector == 0 { - t.lastDataSector = diskSectors - 1 - partSectors + t.lastDataSector = t.secondaryHeader - partSectors - 1 } t.initialized = true @@ -347,15 +348,9 @@ func readPartitionArrayBytes(b []byte, entrySize, logicalSectorSize, physicalSec return parts, nil } -// tableFromBytes read a partition table from a byte slice -func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table, error) { - // minimum size - gpt entries + header + LBA0 for (protective) MBR - if len(b) < logicalBlockSize*2 { - return nil, fmt.Errorf("data for partition was %d bytes instead of expected minimum %d", len(b), logicalBlockSize*2) - } - - // GPT starts at LBA1 - gpt := b[logicalBlockSize:] +// readGPTHeader reads the GPT header from the given byte slice +func readGPTHeader(b []byte) (*Table, error) { + gpt := b // start with fixed headers efiSignature := gpt[0:8] efiRevision := gpt[8:12] @@ -396,12 +391,7 @@ func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table, return nil, fmt.Errorf("invalid EFI Header Checksum, expected %v, got %v", checksum, efiHeaderCrc) } - // potential protective MBR is at LBA0 - hasProtectiveMBR := readProtectiveMBR(b[:logicalBlockSize], uint32(secondaryHeader)) - table := Table{ - LogicalSectorSize: logicalBlockSize, - PhysicalSectorSize: physicalBlockSize, partitionEntrySize: partitionEntrySize, primaryHeader: primaryHeader, secondaryHeader: secondaryHeader, @@ -409,15 +399,63 @@ func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table, lastDataSector: lastDataSector, partitionArraySize: int(partitionEntryCount), partitionFirstLBA: partitionEntryFirstLBA, - ProtectiveMBR: hasProtectiveMBR, GUID: strings.ToUpper(diskGUID.String()), partitionEntryChecksum: partitionEntryChecksum, - initialized: true, } return &table, nil } +// tableHeaderFromBytes read a partition table from a byte slice, mainly used to validate the secondary header +func tableHeaderFromBytes(b []byte, logicalBlockSize, physicalBlockSize int, skipMBR bool) (*Table, error) { + // minimum size - gpt entries + header + LBA0 for (protective) MBR + minSize := logicalBlockSize + if len(b) < minSize { + return nil, fmt.Errorf("data for partition was %d bytes instead of expected minimum %d", len(b), minSize) + } + gpt := b + if skipMBR { + gpt = b[logicalBlockSize:] + } + + table, err := readGPTHeader(gpt) + if err != nil { + return nil, err + } + + // potential protective MBR is at LBA0 + table.ProtectiveMBR = readProtectiveMBR(b[:logicalBlockSize], uint32(table.secondaryHeader)) + table.LogicalSectorSize = logicalBlockSize + table.PhysicalSectorSize = physicalBlockSize + table.initialized = true + + return table, nil +} + +// tableFromBytes read a partition table from a byte slice +func tableFromBytes(b []byte, logicalBlockSize, physicalBlockSize int) (*Table, error) { + // minimum size - gpt entries + header + LBA0 for (protective) MBR + if len(b) < logicalBlockSize*2 { + return nil, fmt.Errorf("data for partition was %d bytes instead of expected minimum %d", len(b), logicalBlockSize*2) + } + + // GPT starts at LBA1 + gpt := b[logicalBlockSize:] + + table, err := readGPTHeader(gpt) + if err != nil { + return nil, err + } + + // potential protective MBR is at LBA0 + table.ProtectiveMBR = readProtectiveMBR(b[:logicalBlockSize], uint32(table.secondaryHeader)) + table.LogicalSectorSize = logicalBlockSize + table.PhysicalSectorSize = physicalBlockSize + table.initialized = true + + return table, nil +} + // Type report the type of table, always "gpt" func (t *Table) Type() string { return "gpt" @@ -550,3 +588,84 @@ func (t *Table) GetPartitions() []part.Partition { } return parts } + +// UUID returns the partition table UUID (disk UUID) +func (t *Table) UUID() string { + return t.GUID +} + +// Verify will attempt to evaluate the headers +func (t *Table) Verify(f util.File, diskSize uint64) error { + if t.LogicalSectorSize == 0 { + // Avoid divide by zero panic. + return fmt.Errorf("table is not initialized") + } + + // Determine the size of disk that GPT expects + expectedDiskSize := (t.secondaryHeader + 1) * uint64(t.LogicalSectorSize) + if diskSize != expectedDiskSize { + return fmt.Errorf("secondary Header is not at end of the disk, expected => %d / actual => %d", expectedDiskSize, diskSize) + } + b := make([]byte, t.LogicalSectorSize) + seekAddress := int64(t.secondaryHeader) * int64(t.LogicalSectorSize) + _, err := f.ReadAt(b, seekAddress) + if err != nil { + return fmt.Errorf("error reading GPT from file at %d / disksize %d : %v", seekAddress, diskSize, err) + } + secondaryTable, err := tableHeaderFromBytes(b, t.LogicalSectorSize, t.PhysicalSectorSize, false) + if err != nil { + return fmt.Errorf("error reading GPT from file at %d / disksize %d : %v", seekAddress, diskSize, err) + } + if t.firstDataSector != secondaryTable.firstDataSector { + return fmt.Errorf("error comparing GPT headers expected => %d / actual => %d", t.firstDataSector, secondaryTable.firstDataSector) + } + partSectors := uint64(t.partitionArraySize) * uint64(t.partitionEntrySize) / uint64(t.LogicalSectorSize) + lastDataSector := t.secondaryHeader - partSectors - 1 + if t.lastDataSector != lastDataSector { + return fmt.Errorf("error comparing GPT secondary headers expected => %d / actual => %d", t.lastDataSector, lastDataSector) + } + return nil +} + +// Repair will attempt to evaluate the headers fix the header location and re-write the primary and secondary header +func (t *Table) Repair(diskSize uint64) error { + if t.LogicalSectorSize == 0 { + // Avoid divide by zero panic. + return fmt.Errorf("table is not initialized") + } + + partSectors := uint64(t.partitionArraySize) * uint64(t.partitionEntrySize) / uint64(t.LogicalSectorSize) + + t.secondaryHeader = (diskSize / uint64(t.LogicalSectorSize)) - 1 + t.lastDataSector = t.secondaryHeader - partSectors - 1 + + return nil +} + +// TotalSize returns the total size of the GPT in bytes. +// +// This is counted from the start of the MBR to the end of the secondary +// header. +func (t *Table) TotalSize() uint64 { + return (t.secondaryHeader + gptHeaderSector) * uint64(t.LogicalSectorSize) +} + +func (t *Table) LastDataSector() uint64 { + return t.lastDataSector +} + +// Resize changes the size of the GPT. +// +// The size argument is in bytes and must be a multiple of the logical sector +// size. +// Use this function in case a storage device is not the same as the total +// size of its GPT. +func (t *Table) Resize(size uint64) { + // how many sectors on the disk? + diskSectors := size / uint64(t.LogicalSectorSize) + // how many sectors used for partition entries? + partSectors := uint64(t.partitionArraySize) * uint64(t.partitionEntrySize) / uint64(t.LogicalSectorSize) + + t.secondaryHeader = diskSectors - 1 + t.lastDataSector = t.secondaryHeader - 1 - partSectors +} diff --git a/vendor/github.com/diskfs/go-diskfs/partition/mbr/partition.go b/vendor/github.com/diskfs/go-diskfs/partition/mbr/partition.go index 4afab694780..a63428da9b0 100644 --- a/vendor/github.com/diskfs/go-diskfs/partition/mbr/partition.go +++ b/vendor/github.com/diskfs/go-diskfs/partition/mbr/partition.go @@ -27,6 +27,8 @@ type Partition struct { // we need this for calculations logicalSectorSize int physicalSectorSize int + // partitionUUID is set when retrieving partitions from a Table + partitionUUID string } // PartitionEqualBytes compares if the bytes for 2 partitions are equal, ignoring CHS start and end @@ -46,7 +48,7 @@ func PartitionEqualBytes(b1, b2 []byte) bool { bytes.Equal(b1[12:16], b2[12:16]) } -// Equal compares if another partition is equal to this one, ignoring CHS start and end +// Equal compares if another partition is equal to this one, ignoring the UUID and CHS start and end func (p *Partition) Equal(p2 *Partition) bool { if p2 == nil { return false @@ -204,3 +206,9 @@ func (p *Partition) sectorSizes() (physical, logical int) { } return physical, logical } + +// UUID returns the partitions UUID. For MBR based partition tables this is the +// partition table UUID with the partition number as a suffix. +func (p *Partition) UUID() string { + return p.partitionUUID +} diff --git a/vendor/github.com/diskfs/go-diskfs/partition/mbr/table.go b/vendor/github.com/diskfs/go-diskfs/partition/mbr/table.go index 631018a3b00..c64dab5f0f9 100644 --- a/vendor/github.com/diskfs/go-diskfs/partition/mbr/table.go +++ b/vendor/github.com/diskfs/go-diskfs/partition/mbr/table.go @@ -2,6 +2,7 @@ package mbr import ( "bytes" + "encoding/binary" "fmt" "github.com/diskfs/go-diskfs/partition/part" @@ -13,7 +14,7 @@ type Table struct { Partitions []*Partition LogicalSectorSize int // logical size of a sector PhysicalSectorSize int // physical size of the sector - initialized bool + partitionTableUUID string } const ( @@ -23,6 +24,9 @@ const ( partitionEntriesStart = 446 partitionEntriesCount = 4 signatureStart = 510 + // the partition table UUID is stored in 4 bytes in the MBR + partitionTableUUIDStart = 440 + partitionTableUUIDEnd = 444 ) // partitionEntrySize standard size of an MBR partition @@ -54,20 +58,7 @@ func comparePartitionArray(p1, p2 []*Partition) bool { return matches } -// ensure that a blank table is initialized -func (t *Table) initTable() { - // default settings - if t.LogicalSectorSize == 0 { - t.LogicalSectorSize = 512 - } - if t.PhysicalSectorSize == 0 { - t.PhysicalSectorSize = 512 - } - - t.initialized = true -} - -// Equal check if another table is equal to this one, ignoring CHS start and end for the partitions +// Equal check if another table is equal to this one, ignoring the partition table UUID and CHS start and end for the partitions func (t *Table) Equal(t2 *Table) bool { if t2 == nil { return false @@ -85,13 +76,14 @@ func tableFromBytes(b []byte) (*Table, error) { if len(b) != mbrSize { return nil, fmt.Errorf("data for partition was %d bytes instead of expected %d", len(b), mbrSize) } - mbrSignature := b[signatureStart:] // validate signature + mbrSignature := b[signatureStart:] if !bytes.Equal(mbrSignature, getMbrSignature()) { return nil, fmt.Errorf("invalid MBR Signature %v", mbrSignature) } + ptUUID := readPartitionTableUUID(b) parts := make([]*Partition, 0, partitionEntriesCount) count := int(partitionEntriesCount) for i := 0; i < count; i++ { @@ -102,6 +94,7 @@ func tableFromBytes(b []byte) (*Table, error) { if err != nil { return nil, fmt.Errorf("error reading partition entry %d: %v", i, err) } + p.partitionUUID = formatPartitionUUID(ptUUID, i+1) parts = append(parts, p) } @@ -109,17 +102,38 @@ func tableFromBytes(b []byte) (*Table, error) { Partitions: parts, LogicalSectorSize: logicalSectorSize, PhysicalSectorSize: 512, + partitionTableUUID: ptUUID, } return table, nil } +func readPartitionTableUUID(b []byte) string { + ptUUID := b[partitionTableUUIDStart:partitionTableUUIDEnd] + return fmt.Sprintf("%x", binary.LittleEndian.Uint32(ptUUID)) +} + +// UUID returns the partition table UUID used to identify disks +func (t *Table) UUID() string { + return t.partitionTableUUID +} + +// formatPartitionUUID creates the partition UUID which is created by using the +// partition table UUID and the partition index. +// Format string taken from libblkid: +// https://github.com/util-linux/util-linux/blob/master/libblkid/src/partitions/partitions.c#L1387C42-L1387C52 +func formatPartitionUUID(ptUUID string, index int) string { + return fmt.Sprintf("%.33s-%02x", ptUUID, index) +} + // Type report the type of table, always the string "mbr" func (t *Table) Type() string { return "mbr" } // Read read a partition table from a disk, given the logical block size and physical block size +// +//nolint:unused,revive // not used in MBR, but it is important to implement the interface func Read(f util.File, logicalBlockSize, physicalBlockSize int) (*Table, error) { // read the data off of the disk b := make([]byte, mbrSize) @@ -155,6 +169,8 @@ func (t *Table) toBytes() []byte { // Write writes a given MBR Table to disk. // Must be passed the util.File to write to and the size of the disk +// +//nolint:unused,revive // not used in MBR, but it is important to implement the interface func (t *Table) Write(f util.File, size int64) error { b := t.toBytes() @@ -176,3 +192,17 @@ func (t *Table) GetPartitions() []part.Partition { } return parts } + +// Verify will attempt to evaluate the headers +// +//nolint:unused,revive // not used in MBR, but it is important to implement the interface +func (t *Table) Verify(f util.File, diskSize uint64) error { + return nil +} + +// Repair will attempt to repair a broken Master Boot Record +// +//nolint:unused,revive // not used in MBR, but it is important to implement the interface +func (t *Table) Repair(diskSize uint64) error { + return nil +} diff --git a/vendor/github.com/diskfs/go-diskfs/partition/part/partition.go b/vendor/github.com/diskfs/go-diskfs/partition/part/partition.go index 2a9a3dbec60..f87ba9faa02 100644 --- a/vendor/github.com/diskfs/go-diskfs/partition/part/partition.go +++ b/vendor/github.com/diskfs/go-diskfs/partition/part/partition.go @@ -12,4 +12,5 @@ type Partition interface { GetStart() int64 ReadContents(util.File, io.Writer) (int64, error) WriteContents(util.File, io.Reader) (uint64, error) + UUID() string } diff --git a/vendor/github.com/diskfs/go-diskfs/partition/table.go b/vendor/github.com/diskfs/go-diskfs/partition/table.go index 29cf0046593..62f56503a4d 100644 --- a/vendor/github.com/diskfs/go-diskfs/partition/table.go +++ b/vendor/github.com/diskfs/go-diskfs/partition/table.go @@ -10,4 +10,7 @@ type Table interface { Type() string Write(util.File, int64) error GetPartitions() []part.Partition + Repair(diskSize uint64) error + Verify(f util.File, diskSize uint64) error + UUID() string } diff --git a/vendor/github.com/diskfs/go-diskfs/util/bitmap.go b/vendor/github.com/diskfs/go-diskfs/util/bitmap.go new file mode 100644 index 00000000000..6fb85a1ba77 --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/util/bitmap.go @@ -0,0 +1,171 @@ +package util + +import "fmt" + +// Bitmap is a structure holding a bitmap +type Bitmap struct { + bits []byte +} + +// Contiguous a position and count of contiguous bits, either free or set +type Contiguous struct { + Position int + Count int +} + +// BitmapFromBytes create a bitmap struct from bytes +func BitmapFromBytes(b []byte) *Bitmap { + // just copy them over + bits := make([]byte, len(b)) + copy(bits, b) + bm := Bitmap{ + bits: bits, + } + + return &bm +} + +// NewBitmap creates a new bitmap of size bytes; it is not in bits to force the caller to have +// a complete set +func NewBitmap(bytes int) *Bitmap { + bm := Bitmap{ + bits: make([]byte, bytes), + } + return &bm +} + +// ToBytes returns raw bytes underlying the bitmap +func (bm *Bitmap) ToBytes() []byte { + b := make([]byte, len(bm.bits)) + copy(b, bm.bits) + + return b +} + +// FromBytes overwrite the existing map with the contents of the bytes. +// It is the equivalent of BitmapFromBytes, but uses an existing Bitmap. +func (bm *Bitmap) FromBytes(b []byte) { + bm.bits = make([]byte, len(b)) + copy(bm.bits, b) +} + +// IsSet check if a specific bit location is set +func (bm *Bitmap) IsSet(location int) (bool, error) { + byteNumber, bitNumber := findBitForIndex(location) + if byteNumber > len(bm.bits) { + return false, fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8) + } + mask := byte(0x1) << bitNumber + return bm.bits[byteNumber]&mask == mask, nil +} + +// Clear a specific bit location +func (bm *Bitmap) Clear(location int) error { + byteNumber, bitNumber := findBitForIndex(location) + if byteNumber > len(bm.bits) { + return fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8) + } + mask := byte(0x1) << bitNumber + mask = ^mask + bm.bits[byteNumber] &= mask + return nil +} + +// Set a specific bit location +func (bm *Bitmap) Set(location int) error { + byteNumber, bitNumber := findBitForIndex(location) + if byteNumber > len(bm.bits) { + return fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8) + } + mask := byte(0x1) << bitNumber + bm.bits[byteNumber] |= mask + return nil +} + +// FirstFree returns the first free bit in the bitmap +// Begins at start, so if you want to find the first free bit, pass start=1. +// Returns -1 if none found. +func (bm *Bitmap) FirstFree(start int) int { + var location = -1 + candidates := bm.bits[start/8:] + for i, b := range candidates { + // if all used, continue to next byte + if b&0xff == 0xff { + continue + } + // not all used, so find first bit set to 0 + for j := uint8(0); j < 8; j++ { + mask := byte(0x1) << j + if b&mask != mask { + location = 8*i + int(j) + break + } + } + break + } + return location +} + +// FirstSet returns location of first set bit in the bitmap +func (bm *Bitmap) FirstSet() int { + var location = -1 + for i, b := range bm.bits { + // if all free, continue to next + if b == 0x00 { + continue + } + // not all free, so find first bit set to 1 + for j := uint8(0); j < 8; j++ { + mask := byte(0x1) << j + mask = ^mask + if b|mask != mask { + location = 8*i + (8 - int(j)) + break + } + } + break + } + return location +} + +// FreeList returns a slicelist of contiguous free locations by location. +// It is sorted by location. If you want to sort it by size, uses sort.Slice +// for example, if the bitmap is 10010010 00100000 10000010, it will return +// +// 1: 2, // 2 free bits at position 1 +// 4: 2, // 2 free bits at position 4 +// 8: 3, // 3 free bits at position 8 +// 11: 5 // 5 free bits at position 11 +// 17: 5 // 5 free bits at position 17 +// 23: 1, // 1 free bit at position 23 +// +// if you want it in reverse order, just reverse the slice. +func (bm *Bitmap) FreeList() []Contiguous { + var list []Contiguous + var location = -1 + var count = 0 + for i, b := range bm.bits { + for j := uint8(0); j < 8; j++ { + mask := byte(0x1) << j + switch { + case b&mask != mask: + if location == -1 { + location = 8*i + int(j) + } + count++ + case location != -1: + list = append(list, Contiguous{location, count}) + location = -1 + count = 0 + } + } + } + if location != -1 { + list = append(list, Contiguous{location, count}) + } + return list +} + +func findBitForIndex(index int) (byteNumber int, bitNumber uint8) { + return index / 8, uint8(index % 8) +} diff --git a/vendor/github.com/diskfs/go-diskfs/util/uniqify.go b/vendor/github.com/diskfs/go-diskfs/util/uniqify.go new file mode 100644 index 00000000000..c091a6ecb8e --- /dev/null +++ b/vendor/github.com/diskfs/go-diskfs/util/uniqify.go @@ -0,0 +1,13 @@ +package util + +func Uniqify[T comparable](s []T) []T { + m := make(map[T]bool) + for _, v := range s { + m[v] = true + } + var result = make([]T, 0, len(m)) + for k := range m { + result = append(result, k) + } + return result +} diff --git a/vendor/gopkg.in/djherbis/times.v1/LICENSE b/vendor/github.com/djherbis/times/LICENSE similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/LICENSE rename to vendor/github.com/djherbis/times/LICENSE diff --git a/vendor/gopkg.in/djherbis/times.v1/README.md b/vendor/github.com/djherbis/times/README.md similarity index 82% rename from vendor/gopkg.in/djherbis/times.v1/README.md rename to vendor/github.com/djherbis/times/README.md index b68a132bf33..7c88890489c 100644 --- a/vendor/gopkg.in/djherbis/times.v1/README.md +++ b/vendor/github.com/djherbis/times/README.md @@ -4,7 +4,7 @@ times [![GoDoc](https://godoc.org/github.com/djherbis/times?status.svg)](https://godoc.org/github.com/djherbis/times) [![Release](https://img.shields.io/github/release/djherbis/times.svg)](https://github.com/djherbis/times/releases/latest) [![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.txt) -[![Build Status](https://travis-ci.org/djherbis/times.svg?branch=master)](https://travis-ci.org/djherbis/times) +[![go test](https://github.com/djherbis/times/actions/workflows/go-test.yml/badge.svg)](https://github.com/djherbis/times/actions/workflows/go-test.yml) [![Coverage Status](https://coveralls.io/repos/djherbis/times/badge.svg?branch=master)](https://coveralls.io/r/djherbis/times?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/djherbis/times)](https://goreportcard.com/report/github.com/djherbis/times) [![Sourcegraph](https://sourcegraph.com/github.com/djherbis/times/-/badge.svg)](https://sourcegraph.com/github.com/djherbis/times?badge) @@ -21,7 +21,7 @@ package main import ( "log" - "gopkg.in/djherbis/times.v1" + "github.com/djherbis/times" ) func main() { @@ -50,8 +50,11 @@ Supported Times | atime | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | mtime | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ctime | ✓* | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ | -| btime | ✓ | | | | | ✓ | ✓| ✓ | | | +| btime | ✓ | ✓* | | | | ✓ | ✓| ✓ | | | +* Linux btime requires kernel 4.11 and filesystem support, so HasBirthTime = false. +Use Timespec.HasBirthTime() to check if file has birth time. +Get(FileInfo) never returns btime. * Windows XP does not have ChangeTime so HasChangeTime = false, however Vista onward does have ChangeTime so Timespec.HasChangeTime() will only return false on those platforms when the syscall used to obtain them fails. @@ -60,5 +63,5 @@ only return false on those platforms when the syscall used to obtain them fails. Installation ------------ ```sh -go get gopkg.in/djherbis/times.v1 +go get -u github.com/djherbis/times ``` diff --git a/vendor/gopkg.in/djherbis/times.v1/ctime_windows.go b/vendor/github.com/djherbis/times/ctime_windows.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/ctime_windows.go rename to vendor/github.com/djherbis/times/ctime_windows.go diff --git a/vendor/gopkg.in/djherbis/times.v1/js.cover.dockerfile b/vendor/github.com/djherbis/times/js.cover.dockerfile similarity index 75% rename from vendor/gopkg.in/djherbis/times.v1/js.cover.dockerfile rename to vendor/github.com/djherbis/times/js.cover.dockerfile index 1f52edccd4a..f522a5e4f82 100644 --- a/vendor/gopkg.in/djherbis/times.v1/js.cover.dockerfile +++ b/vendor/github.com/djherbis/times/js.cover.dockerfile @@ -1,6 +1,6 @@ -FROM golang:1.16 +FROM golang:1.17 -RUN curl -sL https://deb.nodesource.com/setup_8.x | bash +RUN curl -sL https://deb.nodesource.com/setup_17.x | bash RUN apt-get install --yes nodejs WORKDIR /go/src/github.com/djherbis/times diff --git a/vendor/gopkg.in/djherbis/times.v1/js.cover.sh b/vendor/github.com/djherbis/times/js.cover.sh similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/js.cover.sh rename to vendor/github.com/djherbis/times/js.cover.sh diff --git a/vendor/github.com/djherbis/times/linux.cover.dockerfile b/vendor/github.com/djherbis/times/linux.cover.dockerfile new file mode 100644 index 00000000000..e1a795927c9 --- /dev/null +++ b/vendor/github.com/djherbis/times/linux.cover.dockerfile @@ -0,0 +1,6 @@ +FROM golang:1.17 + +WORKDIR /go/src/github.com/djherbis/times +COPY . . + +RUN GO111MODULE=auto go test -covermode=count -coverprofile=profile.cov diff --git a/vendor/github.com/djherbis/times/linux.cover.sh b/vendor/github.com/djherbis/times/linux.cover.sh new file mode 100644 index 00000000000..83f97431b06 --- /dev/null +++ b/vendor/github.com/djherbis/times/linux.cover.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +docker build -f linux.cover.dockerfile -t linux.cover.djherbis.times . +docker create --name linux.cover.djherbis.times linux.cover.djherbis.times +docker cp linux.cover.djherbis.times:/go/src/github.com/djherbis/times/profile.cov . +docker rm -v linux.cover.djherbis.times \ No newline at end of file diff --git a/vendor/gopkg.in/djherbis/times.v1/times.go b/vendor/github.com/djherbis/times/times.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times.go rename to vendor/github.com/djherbis/times/times.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_aix.go b/vendor/github.com/djherbis/times/times_aix.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_aix.go rename to vendor/github.com/djherbis/times/times_aix.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_darwin.go b/vendor/github.com/djherbis/times/times_darwin.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_darwin.go rename to vendor/github.com/djherbis/times/times_darwin.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_dragonfly.go b/vendor/github.com/djherbis/times/times_dragonfly.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_dragonfly.go rename to vendor/github.com/djherbis/times/times_dragonfly.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_freebsd.go b/vendor/github.com/djherbis/times/times_freebsd.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_freebsd.go rename to vendor/github.com/djherbis/times/times_freebsd.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_js.go b/vendor/github.com/djherbis/times/times_js.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_js.go rename to vendor/github.com/djherbis/times/times_js.go diff --git a/vendor/github.com/djherbis/times/times_linux.go b/vendor/github.com/djherbis/times/times_linux.go new file mode 100644 index 00000000000..85f87dba3c8 --- /dev/null +++ b/vendor/github.com/djherbis/times/times_linux.go @@ -0,0 +1,185 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// http://golang.org/src/os/stat_linux.go + +package times + +import ( + "errors" + "os" + "sync/atomic" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// HasChangeTime and HasBirthTime are true if and only if +// the target OS supports them. +const ( + HasChangeTime = true + HasBirthTime = false +) + +type timespec struct { + atime + mtime + ctime + nobtime +} + +type timespecBtime struct { + atime + mtime + ctime + btime +} + +var ( + supportsStatx int32 = 1 + statxFunc = unix.Statx +) + +func isStatXSupported() bool { + return atomic.LoadInt32(&supportsStatx) == 1 +} + +func isStatXUnsupported(err error) bool { + // linux 4.10 and earlier does not support Statx syscall + if err != nil && errors.Is(err, unix.ENOSYS) { + atomic.StoreInt32(&supportsStatx, 0) + return true + } + return false +} + +// Stat returns the Timespec for the given filename. +func Stat(name string) (Timespec, error) { + if isStatXSupported() { + ts, err := statX(name) + if err == nil { + return ts, nil + } + if !isStatXUnsupported(err) { + return nil, err + } + // Fallback. + } + return stat(name, os.Stat) +} + +func statX(name string) (Timespec, error) { + // https://man7.org/linux/man-pages/man2/statx.2.html + var statx unix.Statx_t + err := statxFunc(unix.AT_FDCWD, name, unix.AT_STATX_SYNC_AS_STAT, unix.STATX_ATIME|unix.STATX_MTIME|unix.STATX_CTIME|unix.STATX_BTIME, &statx) + if err != nil { + return nil, err + } + return extractTimes(&statx), nil +} + +// Lstat returns the Timespec for the given filename, and does not follow Symlinks. +func Lstat(name string) (Timespec, error) { + if isStatXSupported() { + ts, err := lstatx(name) + if err == nil { + return ts, nil + } + if !isStatXUnsupported(err) { + return nil, err + } + // Fallback. + } + return stat(name, os.Lstat) +} + +func lstatx(name string) (Timespec, error) { + // https://man7.org/linux/man-pages/man2/statx.2.html + var statX unix.Statx_t + err := statxFunc(unix.AT_FDCWD, name, unix.AT_STATX_SYNC_AS_STAT|unix.AT_SYMLINK_NOFOLLOW, unix.STATX_ATIME|unix.STATX_MTIME|unix.STATX_CTIME|unix.STATX_BTIME, &statX) + if err != nil { + return nil, err + } + return extractTimes(&statX), nil +} + +func statXFile(file *os.File) (Timespec, error) { + sc, err := file.SyscallConn() + if err != nil { + return nil, err + } + + var statx unix.Statx_t + var statxErr error + err = sc.Control(func(fd uintptr) { + // https://man7.org/linux/man-pages/man2/statx.2.html + statxErr = statxFunc(int(fd), "", unix.AT_EMPTY_PATH|unix.AT_STATX_SYNC_AS_STAT, unix.STATX_ATIME|unix.STATX_MTIME|unix.STATX_CTIME|unix.STATX_BTIME, &statx) + }) + if err != nil { + return nil, err + } + + if statxErr != nil { + return nil, statxErr + } + + return extractTimes(&statx), nil +} + +// StatFile returns the Timespec for the given *os.File. +func StatFile(file *os.File) (Timespec, error) { + if isStatXSupported() { + ts, err := statXFile(file) + if err == nil { + return ts, nil + } + if !isStatXUnsupported(err) { + return nil, err + } + // Fallback. + } + return statFile(file) +} + +func statFile(file *os.File) (Timespec, error) { + fi, err := file.Stat() + if err != nil { + return nil, err + } + return getTimespec(fi), nil +} + +func statxTimestampToTime(ts unix.StatxTimestamp) time.Time { + return time.Unix(ts.Sec, int64(ts.Nsec)) +} + +func extractTimes(statx *unix.Statx_t) Timespec { + if statx.Mask&unix.STATX_BTIME == unix.STATX_BTIME { + var t timespecBtime + t.atime.v = statxTimestampToTime(statx.Atime) + t.mtime.v = statxTimestampToTime(statx.Mtime) + t.ctime.v = statxTimestampToTime(statx.Ctime) + t.btime.v = statxTimestampToTime(statx.Btime) + return t + } + + var t timespec + t.atime.v = statxTimestampToTime(statx.Atime) + t.mtime.v = statxTimestampToTime(statx.Mtime) + t.ctime.v = statxTimestampToTime(statx.Ctime) + return t +} + +func timespecToTime(ts syscall.Timespec) time.Time { + return time.Unix(int64(ts.Sec), int64(ts.Nsec)) +} + +func getTimespec(fi os.FileInfo) (t timespec) { + stat := fi.Sys().(*syscall.Stat_t) + t.atime.v = timespecToTime(stat.Atim) + t.mtime.v = timespecToTime(stat.Mtim) + t.ctime.v = timespecToTime(stat.Ctim) + return t +} diff --git a/vendor/gopkg.in/djherbis/times.v1/times_nacl.go b/vendor/github.com/djherbis/times/times_nacl.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_nacl.go rename to vendor/github.com/djherbis/times/times_nacl.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_netbsd.go b/vendor/github.com/djherbis/times/times_netbsd.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_netbsd.go rename to vendor/github.com/djherbis/times/times_netbsd.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_openbsd.go b/vendor/github.com/djherbis/times/times_openbsd.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_openbsd.go rename to vendor/github.com/djherbis/times/times_openbsd.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_plan9.go b/vendor/github.com/djherbis/times/times_plan9.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_plan9.go rename to vendor/github.com/djherbis/times/times_plan9.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_solaris.go b/vendor/github.com/djherbis/times/times_solaris.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_solaris.go rename to vendor/github.com/djherbis/times/times_solaris.go diff --git a/vendor/gopkg.in/djherbis/times.v1/times_linux.go b/vendor/github.com/djherbis/times/times_wasip1.go similarity index 60% rename from vendor/gopkg.in/djherbis/times.v1/times_linux.go rename to vendor/github.com/djherbis/times/times_wasip1.go index d9eb6976e2c..5463a8f3c65 100644 --- a/vendor/gopkg.in/djherbis/times.v1/times_linux.go +++ b/vendor/github.com/djherbis/times/times_wasip1.go @@ -2,7 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// http://golang.org/src/os/stat_linux.go +// https://github.com/golang/go/blob/master/src/os/stat_wasip1.go + +//go:build wasip1 +// +build wasip1 package times @@ -26,14 +29,14 @@ type timespec struct { nobtime } -func timespecToTime(ts syscall.Timespec) time.Time { - return time.Unix(int64(ts.Sec), int64(ts.Nsec)) +func timespecToTime(sec, nsec int64) time.Time { + return time.Unix(sec, nsec) } func getTimespec(fi os.FileInfo) (t timespec) { stat := fi.Sys().(*syscall.Stat_t) - t.atime.v = timespecToTime(stat.Atim) - t.mtime.v = timespecToTime(stat.Mtim) - t.ctime.v = timespecToTime(stat.Ctim) + t.atime.v = timespecToTime(int64(stat.Atime), 0) + t.mtime.v = timespecToTime(int64(stat.Mtime), 0) + t.ctime.v = timespecToTime(int64(stat.Ctime), 0) return t } diff --git a/vendor/gopkg.in/djherbis/times.v1/times_windows.go b/vendor/github.com/djherbis/times/times_windows.go similarity index 100% rename from vendor/gopkg.in/djherbis/times.v1/times_windows.go rename to vendor/github.com/djherbis/times/times_windows.go diff --git a/vendor/gopkg.in/djherbis/times.v1/use_generic_stat.go b/vendor/github.com/djherbis/times/use_generic_stat.go similarity index 58% rename from vendor/gopkg.in/djherbis/times.v1/use_generic_stat.go rename to vendor/github.com/djherbis/times/use_generic_stat.go index bc51560a8c4..0040aa9dc51 100644 --- a/vendor/gopkg.in/djherbis/times.v1/use_generic_stat.go +++ b/vendor/github.com/djherbis/times/use_generic_stat.go @@ -1,4 +1,4 @@ -// +build !windows +// +build !windows,!linux package times @@ -13,3 +13,12 @@ func Stat(name string) (Timespec, error) { func Lstat(name string) (Timespec, error) { return stat(name, os.Lstat) } + +// StatFile returns the Timespec for the given *os.File. +func StatFile(file *os.File) (Timespec, error) { + fi, err := file.Stat() + if err != nil { + return nil, err + } + return getTimespec(fi), nil +} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 00000000000..402433593c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 00000000000..d31b3781527 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 00000000000..4528059ca68 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,123 @@ +version: 2 + +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + version_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000000..87d55747778 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 00000000000..244ee19c4bf --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,671 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# package usage + +Use `go get github.com/klauspost/compress@latest` to add it to your project. + +This package will support the current Go version and 2 versions back. + +* Use the `nounsafe` tag to disable all use of the "unsafe" package. +* Use the `noasm` tag to disable all assembly across packages. + +Use the links above for more information on each. + +# changelog + +* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) + * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 + * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 + * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043 + * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045 + * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048 + * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 + * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 + +* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) + * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 + * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 + * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011 + * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013 + +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
+ +
+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +Typical speed is about 2x of the standard library packages. + +| old import | new import | Documentation | +|------------------|---------------------------------------|-------------------------------------------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) | +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +To disable all assembly add `-tags=noasm`. This works across all packages. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 00000000000..ca6685e2b72 --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 00000000000..ea5a692d513 --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000000..ea7324da671 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000000..f65eb3909cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000000..e82fa3bb7b6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,167 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000000..abade2d6052 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000000..074018d8f94 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + s.bw.close() + return nil +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000000..535cbadfdea --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 00000000000..aff942205f1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000000..b3d262958f8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000000..8b6e5c66383 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000000..bfc7a523dee --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,224 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/internal/le" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekByteFast requires that at least one byte is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + low := le.Load32(b.in, b.off-4) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = le.Load64(b.in, b.off-8) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + low := le.Load32(b.in, b.off-4) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + low := le.Load32(b.in, b.off-4) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + b.value = le.Load64(b.in, b.off-8) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + low := le.Load32(b.in, b.off-4) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000000..0ebc9aaac76 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,102 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000000..84aa3d12f00 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,742 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src), nil +} + +func (s *Scratch) compress1xDo(dst, src []byte) []byte { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + bw.close() + return bw.out +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + idx := len(s.Out) + s.Out = s.compress1xDo(s.Out, toDo) + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000000..0f56b02d747 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errors, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000000..ba7e8e6b027 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000000..c4c7ab2d1fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000000..908c17de63f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000000..77ecd68e0a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + srcLen int + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.srcLen = len(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 00000000000..3954c51219b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 00000000000..e802579c4f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 00000000000..4465fbe9e90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go new file mode 100644 index 00000000000..e54909e16fc --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/le.go @@ -0,0 +1,5 @@ +package le + +type Indexer interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go new file mode 100644 index 00000000000..0cfb5c0e278 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -0,0 +1,42 @@ +//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine + +package le + +import ( + "encoding/binary" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + return b[i] +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + return binary.LittleEndian.Uint16(b[i:]) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + binary.LittleEndian.PutUint16(b, v) +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + binary.LittleEndian.PutUint32(b, v) +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + binary.LittleEndian.PutUint64(b, v) +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go new file mode 100644 index 00000000000..ada45cd909e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -0,0 +1,55 @@ +// We enable 64 bit LE platforms: + +//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine + +package le + +import ( + "unsafe" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + //return binary.LittleEndian.Uint32(b[i:]) + //return *(*uint32)(unsafe.Pointer(&b[i])) + return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + //return binary.LittleEndian.Uint64(b[i:]) + //return *(*uint64)(unsafe.Pointer(&b[i])) + return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + //binary.LittleEndian.PutUint16(b, v) + *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + //binary.LittleEndian.PutUint32(b, v) + *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + //binary.LittleEndian.PutUint64(b, v) + *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 00000000000..40796a49d65 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 00000000000..77395a6b8b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 00000000000..13c6040a5de --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 00000000000..2754bac6f16 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 00000000000..34d01f4aa63 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 00000000000..81bda5e2946 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,3 @@ +module github.com/klauspost/compress + +go 1.22 diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000000..c11d7fa28e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features. + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000000..d41e3e1709b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,135 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" + "math/bits" + + "github.com/klauspost/compress/internal/le" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + value uint64 // Maybe use [16]byte, but shifting is awkward. + cursor int // offset where next read should end + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.cursor = len(in) + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) + b.bitsRead -= 32 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + b.cursor -= 8 + b.value = le.Load64(b.in, b.cursor) + b.bitsRead = 0 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.cursor >= 4 { + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) + b.bitsRead -= 32 + return + } + + b.bitsRead -= uint8(8 * b.cursor) + for b.cursor > 0 { + b.cursor -= 1 + b.value = (b.value << 8) | uint64(b.in[b.cursor]) + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.cursor == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return 8*uint(b.cursor) + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + b.cursor = 0 + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000000..1952f175b0d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,112 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000000..0dd742fd2a6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,712 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "hash/crc32" + "io" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + if debugDecoder { + println("Reading table for", tableIndex(i)) + } + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000000..fd35ea1480a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,892 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "slices" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 16 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + cnt := int(slices.Max(hist[:maxSym])) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 16 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1]))) + b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1]))) + b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1]))) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000000..01a01e486e1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000000..55a388553df --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000000..0e59a242d8d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 00000000000..6a5a2988b6f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,261 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { + *h = Header{} + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return nil, ErrMagicMismatch + } + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return in[4:], nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return nil, errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return in, nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return in, nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000000..ea2a19376c1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,949 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes read and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + frame.history.decoders.br.cursor = 0 + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000000..774c5f00fe4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000000..b7b83164bc7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,565 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if int(offset) >= len(o.History) { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 00000000000..7d250c67f59 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,173 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("t (%d) < 0", t) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 00000000000..4613724e9d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,560 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep) & 3) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if m.rep <= 0 { + // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + s = best.s + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + s = best.s + best.length + nextEmit = s + + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 + } + + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + off++ + } + if s >= sLimit { + break encodeLoop + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000000..84a79fde767 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1252 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000000..d36be7bd8c2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000000..f45a3da7dae --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,891 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000000..8f8223cd3a6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,642 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst := fh.appendTo(tmp[:0]) + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + if final { + s.eofWritten = true + } + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst = fh.appendTo(dst) + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000000..20671dcb91d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level and max 8MB. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 8 << 20 + case SpeedBestCompression: + o.windowSize = 8 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000000..e47af66e7c9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,415 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000000..667ca06794e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) []byte { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are not stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000000..2f8860a722b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 00000000000..d04a829b0a0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 00000000000..bcde3986953 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 00000000000..8adfebb0297 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,73 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + v = 1 + } + symbolNext[i] = uint16(v) + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + for { + // lowprob area + position = (position + step) & tableMask + if position <= highThreshold { + break + } + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000000..ab26326a8ff --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000000..474cb77d2b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000000..5d73c21ebdd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000000..09164856d22 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000000..24b53065f40 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000000..777290d44ce --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000000..fc40c820016 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000000..ddb63aa91b1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 00000000000..ae7d4d3295a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(s *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD s+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 00000000000..d4221edf4fd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000000..0be16cefc7f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000000..6f3b0cb1026 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 00000000000..f41932b7a4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 00000000000..0782b86e3d1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,66 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SHRL $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 00000000000..bea1779e973 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,38 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "math/bits" + + "github.com/klauspost/compress/internal/le" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + left := len(a) + for left >= 8 { + diff := le.Load64(a, n) ^ le.Load64(b, n) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + left -= 8 + } + a = a[n:] + b = b[n:] + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000000..9a7de82f9ef --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,503 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fill() + } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 00000000000..c59f17e07ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,394 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 00000000000..a708ca6d3d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4151 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 40(CX), BX + MOVQ (CX), AX + MOVQ 32(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 40(CX), BX + MOVQ (CX), AX + MOVQ 32(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 40(BX), DX + MOVQ (BX), CX + MOVQ 32(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 40(BX), DX + MOVQ (BX), CX + MOVQ 32(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 40(CX), BX + MOVQ (CX), AX + MOVQ 32(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 40(BX), DX + MOVQ (BX), CX + MOVQ 32(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 40(CX), BX + MOVQ (CX), AX + MOVQ 32(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 40(BX), DX + MOVQ (BX), CX + MOVQ 32(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 00000000000..7cec2197cd9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000000..65045eabdde --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,112 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000000..a17381b8f89 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,434 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + var n int + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, r.err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, r.err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 00000000000..29c15c8c4ef --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000000..6252b46ae6f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,126 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "errors" + "log" + "math" + + "github.com/klauspost/compress/internal/le" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return le.Load32(b, i) +} + +func load6432(b []byte, i int32) uint64 { + return le.Load64(b, i) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/openshift/assisted-image-service/internal/common/version.go b/vendor/github.com/openshift/assisted-image-service/internal/common/version.go new file mode 100644 index 00000000000..c9b888125e2 --- /dev/null +++ b/vendor/github.com/openshift/assisted-image-service/internal/common/version.go @@ -0,0 +1,17 @@ +package common + +import ( + "github.com/hashicorp/go-version" +) + +func VersionGreaterOrEqual(version1, version2 string) (bool, error) { + v1, err := version.NewVersion(version1) + if err != nil { + return false, err + } + v2, err := version.NewVersion(version2) + if err != nil { + return false, err + } + return v1.GreaterThanOrEqual(v2), nil +} diff --git a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/ignition.go b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/ignition.go index 401ea596027..22754f9d107 100644 --- a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/ignition.go +++ b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/ignition.go @@ -2,47 +2,41 @@ package isoeditor import ( "bytes" - "compress/gzip" - - "github.com/cavaliercoder/go-cpio" - "github.com/pkg/errors" + "fmt" + "path" + "strings" ) type IgnitionContent struct { - Config []byte + Config []byte + SystemConfigs map[string][]byte } func (ic *IgnitionContent) Archive() (*bytes.Reader, error) { - // Run gzip compression - compressedBuffer := new(bytes.Buffer) - gzipWriter := gzip.NewWriter(compressedBuffer) - // Create CPIO archive - cpioWriter := cpio.NewWriter(gzipWriter) - - if err := cpioWriter.WriteHeader(&cpio.Header{ - Name: "config.ign", - Mode: 0o100_644, - Size: int64(len(ic.Config)), - }); err != nil { - return nil, errors.Wrap(err, "Failed to write CPIO header") - } - if _, err := cpioWriter.Write(ic.Config); err != nil { - return nil, errors.Wrap(err, "Failed to write CPIO archive") + var files []fileEntry + + if len(ic.Config) > 0 { + files = append(files, fileEntry{ + Content: ic.Config, + Path: "config.ign", + Mode: 0o100_644, + }) } - if err := cpioWriter.Close(); err != nil { - return nil, errors.Wrap(err, "Failed to close CPIO archive") - } - if err := gzipWriter.Close(); err != nil { - return nil, errors.Wrap(err, "Failed to gzip ignition config") - } - - padSize := (4 - (compressedBuffer.Len() % 4)) % 4 - for i := 0; i < padSize; i++ { - if err := compressedBuffer.WriteByte(0); err != nil { - return nil, err + for filename, content := range ic.SystemConfigs { + if strings.Contains(filename, "/") { + return nil, fmt.Errorf("system config filename %q contains path separators", filename) } + files = append(files, fileEntry{ + Content: content, + Path: path.Join("usr", "lib", "ignition", "base.d", filename), + Mode: 0o100_644, + }) } - return bytes.NewReader(compressedBuffer.Bytes()), nil + compressedCpio, err := generateCompressedCPIO(files) + if err != nil { + return nil, err + } + return bytes.NewReader(compressedCpio), nil } diff --git a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/isoutil.go b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/isoutil.go index 828d7985ec0..078e4be2398 100644 --- a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/isoutil.go +++ b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/isoutil.go @@ -1,6 +1,8 @@ package isoeditor import ( + "bytes" + "compress/gzip" "fmt" "io" "math" @@ -8,6 +10,7 @@ import ( "path/filepath" "strings" + "github.com/cavaliercoder/go-cpio" diskfs "github.com/diskfs/go-diskfs" "github.com/diskfs/go-diskfs/disk" "github.com/diskfs/go-diskfs/filesystem" @@ -15,6 +18,13 @@ import ( "github.com/pkg/errors" ) +const ( + AMD64CPUArchitecture = "amd64" + X86CPUArchitecture = "x86_64" + ARM64CPUArchitecture = "arm64" + AARCH64CPUArchitecture = "aarch64" +) + // Extract unpacks the iso contents into the working directory func Extract(isoPath string, workDir string) error { d, err := diskfs.Open(isoPath, diskfs.WithOpenMode(diskfs.ReadOnly)) @@ -195,12 +205,14 @@ func Create(outPath string, workDir string, volumeLabel string) error { // Returns the number of sectors to load for efi boot // Load Sectors * 2048 should be the size of efiboot.img rounded up to a multiple of 2048 +// For UEFI boot, the sector size is 512 +// To support iso9660 (2048) and UEFI (512), sectors must be in blocks of 512, but must also be a multiple of 2048 func efiLoadSectors(workDir string) (uint16, error) { efiStat, err := os.Stat(filepath.Join(workDir, "images/efiboot.img")) if err != nil { return 0, err } - return uint16(math.Ceil(float64(efiStat.Size()) / 2048)), nil + return uint16(math.Ceil(float64(efiStat.Size())/2048) * 4), nil } func cdbootLoadSectors(workDir string) (result uint16, err error) { @@ -235,6 +247,7 @@ func cdbootLoadSectors(workDir string) (result uint16, err error) { if sectors > math.MaxUint16 { sectors = math.MaxUint16 } + // nolint: gosec result = uint16(sectors) return } @@ -342,3 +355,48 @@ func ReadFileFromISO(isoPath, filePath string) ([]byte, error) { func GetISO9660FileSystem(d *disk.Disk) (filesystem.FileSystem, error) { return iso9660.Read(d.File, d.Size, 0, 0) } + +// fileEntry represents a single file to be added to a CPIO archive +type fileEntry struct { + Content []byte + Path string + Mode cpio.FileMode +} + +func generateCompressedCPIO(files []fileEntry) ([]byte, error) { + // Run gzip compression + compressedBuffer := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(compressedBuffer) + // Create CPIO archive + cpioWriter := cpio.NewWriter(gzipWriter) + + // Add each file to the archive + for _, file := range files { + if err := cpioWriter.WriteHeader(&cpio.Header{ + Name: file.Path, + Mode: file.Mode, + Size: int64(len(file.Content)), + }); err != nil { + return nil, errors.Wrap(err, "Failed to write CPIO header") + } + if _, err := cpioWriter.Write(file.Content); err != nil { + return nil, errors.Wrap(err, "Failed to write CPIO archive") + } + } + + if err := cpioWriter.Close(); err != nil { + return nil, errors.Wrap(err, "Failed to close CPIO archive") + } + if err := gzipWriter.Close(); err != nil { + return nil, errors.Wrap(err, "Failed to gzip ignition config") + } + + padSize := (4 - (compressedBuffer.Len() % 4)) % 4 + for i := 0; i < padSize; i++ { + if err := compressedBuffer.WriteByte(0); err != nil { + return nil, err + } + } + + return compressedBuffer.Bytes(), nil +} diff --git a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_editor.go b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_editor.go index 86c5ed1f11e..935b21f0ec2 100644 --- a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_editor.go +++ b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_editor.go @@ -34,15 +34,15 @@ func (m *MockEditor) EXPECT() *MockEditorMockRecorder { } // CreateMinimalISOTemplate mocks base method. -func (m *MockEditor) CreateMinimalISOTemplate(arg0, arg1, arg2, arg3 string) error { +func (m *MockEditor) CreateMinimalISOTemplate(arg0, arg1, arg2, arg3, arg4, arg5 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateMinimalISOTemplate", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "CreateMinimalISOTemplate", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(error) return ret0 } // CreateMinimalISOTemplate indicates an expected call of CreateMinimalISOTemplate. -func (mr *MockEditorMockRecorder) CreateMinimalISOTemplate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockEditorMockRecorder) CreateMinimalISOTemplate(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMinimalISOTemplate", reflect.TypeOf((*MockEditor)(nil).CreateMinimalISOTemplate), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMinimalISOTemplate", reflect.TypeOf((*MockEditor)(nil).CreateMinimalISOTemplate), arg0, arg1, arg2, arg3, arg4, arg5) } diff --git a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_executer.go b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_executer.go new file mode 100644 index 00000000000..416b51f6d94 --- /dev/null +++ b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_executer.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/openshift/assisted-image-service/pkg/isoeditor (interfaces: Executer) + +// Package isoeditor is a generated GoMock package. +package isoeditor + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockExecuter is a mock of Executer interface. +type MockExecuter struct { + ctrl *gomock.Controller + recorder *MockExecuterMockRecorder +} + +// MockExecuterMockRecorder is the mock recorder for MockExecuter. +type MockExecuterMockRecorder struct { + mock *MockExecuter +} + +// NewMockExecuter creates a new mock instance. +func NewMockExecuter(ctrl *gomock.Controller) *MockExecuter { + mock := &MockExecuter{ctrl: ctrl} + mock.recorder = &MockExecuterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExecuter) EXPECT() *MockExecuterMockRecorder { + return m.recorder +} + +// Execute mocks base method. +func (m *MockExecuter) Execute(arg0, arg1 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Execute", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Execute indicates an expected call of Execute. +func (mr *MockExecuterMockRecorder) Execute(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockExecuter)(nil).Execute), arg0, arg1) +} diff --git a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_nmstate_handler.go b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_nmstate_handler.go new file mode 100644 index 00000000000..91ed852bc14 --- /dev/null +++ b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/mock_nmstate_handler.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/openshift/assisted-image-service/pkg/isoeditor (interfaces: NmstateHandler) + +// Package isoeditor is a generated GoMock package. +package isoeditor + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockNmstateHandler is a mock of NmstateHandler interface. +type MockNmstateHandler struct { + ctrl *gomock.Controller + recorder *MockNmstateHandlerMockRecorder +} + +// MockNmstateHandlerMockRecorder is the mock recorder for MockNmstateHandler. +type MockNmstateHandlerMockRecorder struct { + mock *MockNmstateHandler +} + +// NewMockNmstateHandler creates a new mock instance. +func NewMockNmstateHandler(ctrl *gomock.Controller) *MockNmstateHandler { + mock := &MockNmstateHandler{ctrl: ctrl} + mock.recorder = &MockNmstateHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNmstateHandler) EXPECT() *MockNmstateHandlerMockRecorder { + return m.recorder +} + +// BuildNmstateCpioArchive mocks base method. +func (m *MockNmstateHandler) BuildNmstateCpioArchive(arg0 string) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BuildNmstateCpioArchive", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BuildNmstateCpioArchive indicates an expected call of BuildNmstateCpioArchive. +func (mr *MockNmstateHandlerMockRecorder) BuildNmstateCpioArchive(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildNmstateCpioArchive", reflect.TypeOf((*MockNmstateHandler)(nil).BuildNmstateCpioArchive), arg0) +} diff --git a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/nmstate_handler.go b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/nmstate_handler.go new file mode 100644 index 00000000000..94d748ad17d --- /dev/null +++ b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/nmstate_handler.go @@ -0,0 +1,135 @@ +package isoeditor + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +//go:generate mockgen -package=isoeditor -destination=mock_nmstate_handler.go . NmstateHandler +type NmstateHandler interface { + BuildNmstateCpioArchive(rootfsPath string) ([]byte, error) +} + +type nmstateHandler struct { + workDir string + executer Executer +} + +func NewNmstateHandler(workDir string, executer Executer) NmstateHandler { + return &nmstateHandler{ + workDir: workDir, + executer: executer, + } +} + +func (n *nmstateHandler) BuildNmstateCpioArchive(rootfsPath string) ([]byte, error) { + // Extract nmstatectl binary + var err error + nmstateDir := filepath.Join(n.workDir, "nmstate") + err = os.MkdirAll(nmstateDir, os.ModePerm) + if err != nil { + return nil, err + } + // Remove temp dir + defer func() { + removeErr := os.RemoveAll(nmstateDir) + if removeErr != nil { + log.WithError(removeErr).Error("failed to remove nmstate temp dir") + } + }() + + binaryPath, err := n.extractNmstatectl(rootfsPath, nmstateDir) + if err != nil { + return nil, err + } + nmstatectlPath := filepath.Join(nmstateDir, "squashfs-root", binaryPath) + + // Check if nmstatectl binary file exists + if _, err = os.Stat(nmstatectlPath); os.IsNotExist(err) { + return nil, err + } + + // Read binary + nmstateBinContent, err := os.ReadFile(nmstatectlPath) + if err != nil { + return nil, err + } + + // Create a compressed RAM disk image with the nmstatectl binary + compressedCpio, err := generateCompressedCPIO([]fileEntry{ + { + Content: nmstateBinContent, + Path: NmstatectlPathInRamdisk, + Mode: 0o100_755, + }, + }) + if err != nil { + return nil, err + } + + return compressedCpio, err +} + +// TODO: Update the code to utilize go-diskfs's squashfs instead of unsquashfs once go-diskfs supports the zstd compression format used by CoreOS - MGMT-19227 +func (n *nmstateHandler) extractNmstatectl(rootfsPath, nmstateDir string) (string, error) { + _, err := n.executer.Execute(fmt.Sprintf("cat %s | cpio -i", rootfsPath), nmstateDir) + if err != nil { + log.Errorf("failed to extract rootfs.img using cpio command: %v", err.Error()) + return "", err + } + // limiting files is needed on el<=9 due to https://github.com/plougher/squashfs-tools/issues/125 + ulimit := "ulimit -n 1024" + + // Listing the filesystem concisely, displaying only files (using `-lc` option). + // Each file in the output won't include any prefix before `/ostree` (by using `-dest ''` option), + // which is useful when invoking `-extract-file` (after finding the `nmstatectl` binary path). + list, err := n.executer.Execute(fmt.Sprintf("%s ; unsquashfs -dest '' -lc %s", ulimit, "root.squashfs"), nmstateDir) + if err != nil { + log.Errorf("failed to unsquashfs root.squashfs: %v", err.Error()) + return "", err + } + + r, err := regexp.Compile(".*nmstatectl") + if err != nil { + log.Errorf("failed to compile regexp: %v", err.Error()) + return "", err + } + binaryPath := r.FindString(list) + + _, err = n.executer.Execute(fmt.Sprintf("%s ; unsquashfs -no-xattrs %s -extract-file %s", ulimit, "root.squashfs", binaryPath), nmstateDir) + if err != nil { + log.Errorf("failed to unsquashfs root.squashfs: %v", err.Error()) + return "", err + } + return binaryPath, nil +} + +//go:generate mockgen -package=isoeditor -destination=mock_executer.go . Executer +type Executer interface { + Execute(command, workDir string) (string, error) +} + +type CommonExecuter struct{} + +func (e *CommonExecuter) Execute(command, workDir string) (string, error) { + var stdoutBytes, stderrBytes bytes.Buffer + cmd := exec.Command("bash", "-c", command) + cmd.Stdout = &stdoutBytes + cmd.Stderr = &stderrBytes + log.Infof(fmt.Sprintf("Running cmd: %s\n", command)) + cmd.Dir = workDir + err := cmd.Run() + if err != nil { + return "", errors.Wrapf(err, "Failed to execute cmd (%s): %s", cmd, stderrBytes.String()) + } + + return strings.TrimSuffix(stdoutBytes.String(), "\n"), nil +} diff --git a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/rhcos.go b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/rhcos.go index fbd0c8d65bb..adf330c3196 100644 --- a/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/rhcos.go +++ b/vendor/github.com/openshift/assisted-image-service/pkg/isoeditor/rhcos.go @@ -6,30 +6,39 @@ import ( "path/filepath" "regexp" + "github.com/openshift/assisted-image-service/internal/common" log "github.com/sirupsen/logrus" ) const ( - RamDiskPaddingLength = uint64(1024 * 1024) // 1MB - ramDiskImagePath = "/images/assisted_installer_custom.img" + RamDiskPaddingLength = uint64(1024 * 1024) // 1MB + NmstatectlPathInRamdisk = "/usr/bin/nmstatectl" + ramDiskImagePath = "/images/assisted_installer_custom.img" + nmstateDiskImagePath = "/images/nmstate.img" + MinimalVersionForNmstatectl = "4.18.0-ec.0" + RootfsImagePath = "images/pxeboot/rootfs.img" ) //go:generate mockgen -package=isoeditor -destination=mock_editor.go . Editor type Editor interface { - CreateMinimalISOTemplate(fullISOPath, rootFSURL, arch, minimalISOPath string) error + CreateMinimalISOTemplate(fullISOPath, rootFSURL, arch, minimalISOPath, openshiftVersion, nmstatectlPath string) error } type rhcosEditor struct { - workDir string + workDir string + nmstateHandler NmstateHandler } -func NewEditor(dataDir string) Editor { - return &rhcosEditor{workDir: dataDir} +func NewEditor(dataDir string, nmstateHandler NmstateHandler) Editor { + return &rhcosEditor{ + workDir: dataDir, + nmstateHandler: nmstateHandler, + } } // CreateMinimalISO Creates the minimal iso by removing the rootfs and adding the url func CreateMinimalISO(extractDir, volumeID, rootFSURL, arch, minimalISOPath string) error { - if err := os.Remove(filepath.Join(extractDir, "images/pxeboot/rootfs.img")); err != nil { + if err := os.Remove(filepath.Join(extractDir, RootfsImagePath)); err != nil { return err } @@ -38,14 +47,19 @@ func CreateMinimalISO(extractDir, volumeID, rootFSURL, arch, minimalISOPath stri return err } - if err := fixGrubConfig(rootFSURL, extractDir); err != nil { + var includeNmstateRamDisk bool + if _, err := os.Stat(filepath.Join(extractDir, nmstateDiskImagePath)); err == nil { + includeNmstateRamDisk = true + } + + if err := fixGrubConfig(rootFSURL, extractDir, includeNmstateRamDisk); err != nil { log.WithError(err).Warnf("Failed to edit grub config") return err } // ignore isolinux.cfg for ppc64le because it doesn't exist if arch != "ppc64le" { - if err := fixIsolinuxConfig(rootFSURL, extractDir); err != nil { + if err := fixIsolinuxConfig(rootFSURL, extractDir, includeNmstateRamDisk); err != nil { log.WithError(err).Warnf("Failed to edit isolinux config") return err } @@ -58,7 +72,7 @@ func CreateMinimalISO(extractDir, volumeID, rootFSURL, arch, minimalISOPath stri } // CreateMinimalISOTemplate Creates the template minimal iso by removing the rootfs and adding the url -func (e *rhcosEditor) CreateMinimalISOTemplate(fullISOPath, rootFSURL, arch, minimalISOPath string) error { +func (e *rhcosEditor) CreateMinimalISOTemplate(fullISOPath, rootFSURL, arch, minimalISOPath, openshiftVersion, nmstatectlPath string) error { extractDir, err := os.MkdirTemp(e.workDir, "isoutil") if err != nil { return err @@ -73,6 +87,37 @@ func (e *rhcosEditor) CreateMinimalISOTemplate(fullISOPath, rootFSURL, arch, min return err } + ramDiskPath := filepath.Join(extractDir, nmstateDiskImagePath) + + versionOK, err := common.VersionGreaterOrEqual(openshiftVersion, MinimalVersionForNmstatectl) + if err != nil { + return err + } + + if versionOK { + var compressedCpio []byte + var readErr error + + if _, err = os.Stat(nmstatectlPath); err == nil { + // Read and return the cached content + compressedCpio, readErr = os.ReadFile(nmstatectlPath) + if readErr != nil { + return fmt.Errorf("failed to read cached nmstatectl: %v", readErr) + } + } else if os.IsNotExist(err) { + // File doesn't exist - this should be an error condition + return fmt.Errorf("nmstatectl cache file not found: %s", nmstatectlPath) + } else { + // Some other error occurred + return fmt.Errorf("failed to stat nmstatectl cache file: %v", err) + } + + err = os.WriteFile(ramDiskPath, compressedCpio, 0755) //nolint:gosec + if err != nil { + return err + } + } + err = CreateMinimalISO(extractDir, volumeID, rootFSURL, arch, minimalISOPath) if err != nil { return err @@ -103,7 +148,7 @@ func embedInitrdPlaceholders(extractDir string) error { return nil } -func fixGrubConfig(rootFSURL, extractDir string) error { +func fixGrubConfig(rootFSURL, extractDir string, includeNmstateRamDisk bool) error { availableGrubPaths := []string{"EFI/redhat/grub.cfg", "EFI/fedora/grub.cfg", "boot/grub/grub.cfg", "EFI/centos/grub.cfg"} var foundGrubPath string for _, pathSection := range availableGrubPaths { @@ -129,14 +174,20 @@ func fixGrubConfig(rootFSURL, extractDir string) error { } // Edit config to add custom ramdisk image to initrd - if err := editFile(foundGrubPath, `(?m)^(\s+initrd) (.+| )+$`, fmt.Sprintf("$1 $2 %s", ramDiskImagePath)); err != nil { - return err + if includeNmstateRamDisk { + if err := editFile(foundGrubPath, `(?m)^(\s+initrd) (.+| )+$`, fmt.Sprintf("$1 $2 %s %s", ramDiskImagePath, nmstateDiskImagePath)); err != nil { + return err + } + } else { + if err := editFile(foundGrubPath, `(?m)^(\s+initrd) (.+| )+$`, fmt.Sprintf("$1 $2 %s", ramDiskImagePath)); err != nil { + return err + } } return nil } -func fixIsolinuxConfig(rootFSURL, extractDir string) error { +func fixIsolinuxConfig(rootFSURL, extractDir string, includeNmstateRamDisk bool) error { replacement := fmt.Sprintf("$1 $2 coreos.live.rootfs_url=%s", rootFSURL) if err := editFile(filepath.Join(extractDir, "isolinux/isolinux.cfg"), `(?m)^(\s+append) (.+| )+$`, replacement); err != nil { return err @@ -146,8 +197,14 @@ func fixIsolinuxConfig(rootFSURL, extractDir string) error { return err } - if err := editFile(filepath.Join(extractDir, "isolinux/isolinux.cfg"), `(?m)^(\s+append.*initrd=\S+) (.*)$`, fmt.Sprintf("${1},%s ${2}", ramDiskImagePath)); err != nil { - return err + if includeNmstateRamDisk { + if err := editFile(filepath.Join(extractDir, "isolinux/isolinux.cfg"), `(?m)^(\s+append.*initrd=\S+) (.*)$`, fmt.Sprintf("${1},%s,%s ${2}", ramDiskImagePath, nmstateDiskImagePath)); err != nil { + return err + } + } else { + if err := editFile(filepath.Join(extractDir, "isolinux/isolinux.cfg"), `(?m)^(\s+append.*initrd=\S+) (.*)$`, fmt.Sprintf("${1},%s ${2}", ramDiskImagePath)); err != nil { + return err + } } return nil diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go index 499789984d2..69956b425a1 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -1,4 +1,4 @@ -// +build darwin dragonfly freebsd netbsd openbsd +// +build darwin dragonfly freebsd netbsd openbsd hurd // +build !js package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index 04748b8515f..c9aed267a4c 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -1,5 +1,7 @@ +//go:build (linux || aix || zos) && !js && !wasi // +build linux aix zos // +build !js +// +build !wasi package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go new file mode 100644 index 00000000000..2822b212fbf --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go @@ -0,0 +1,8 @@ +//go:build wasi +// +build wasi + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go new file mode 100644 index 00000000000..108a6be12b1 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go @@ -0,0 +1,8 @@ +//go:build wasip1 +// +build wasip1 + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/gopkg.in/djherbis/times.v1/.travis.sh b/vendor/gopkg.in/djherbis/times.v1/.travis.sh deleted file mode 100644 index c59e062f715..00000000000 --- a/vendor/gopkg.in/djherbis/times.v1/.travis.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -set -e - -script() { - if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; - then - COVERALLS_PARALLEL=true - - if [ ! -z "$JS" ]; - then - bash js.cover.sh - else - go test -covermode=count -coverprofile=profile.cov - fi - - go get github.com/axw/gocov/gocov github.com/mattn/goveralls golang.org/x/tools/cmd/cover - $HOME/gopath/bin/goveralls --coverprofile=profile.cov -service=travis-ci - fi - - if [ -z "$JS" ]; - then - go get golang.org/x/lint/golint && golint ./... - go vet - go test -bench=.* -v ./... - fi -} - -"$@" \ No newline at end of file diff --git a/vendor/gopkg.in/djherbis/times.v1/.travis.yml b/vendor/gopkg.in/djherbis/times.v1/.travis.yml deleted file mode 100644 index b3cda1363d8..00000000000 --- a/vendor/gopkg.in/djherbis/times.v1/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go -matrix: - include: - - os: linux - go: tip - - os: linux - go: tip - env: - - JS=1 - - os: osx - go: tip - - os: windows - go: 1.x -#Added power jobs - - os: linux - go: tip - arch: ppc64le - - os: linux - go: tip - arch: ppc64le - env: - - JS=1 -script: bash .travis.sh script -notifications: - webhooks: https://coveralls.io/webhook - email: - on_success: never - on_failure: change diff --git a/vendor/modules.txt b/vendor/modules.txt index 8d52085f0d0..ba32b9a7c7e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -706,11 +706,14 @@ github.com/digitalocean/go-libvirt/internal/event github.com/digitalocean/go-libvirt/internal/go-xdr/xdr2 github.com/digitalocean/go-libvirt/socket github.com/digitalocean/go-libvirt/socket/dialers -# github.com/diskfs/go-diskfs v1.4.0 -## explicit; go 1.19 +# github.com/diskfs/go-diskfs v1.4.1 +## explicit; go 1.21 github.com/diskfs/go-diskfs github.com/diskfs/go-diskfs/disk github.com/diskfs/go-diskfs/filesystem +github.com/diskfs/go-diskfs/filesystem/ext4 +github.com/diskfs/go-diskfs/filesystem/ext4/crc +github.com/diskfs/go-diskfs/filesystem/ext4/md4 github.com/diskfs/go-diskfs/filesystem/fat32 github.com/diskfs/go-diskfs/filesystem/iso9660 github.com/diskfs/go-diskfs/filesystem/squashfs @@ -719,6 +722,9 @@ github.com/diskfs/go-diskfs/partition/gpt github.com/diskfs/go-diskfs/partition/mbr github.com/diskfs/go-diskfs/partition/part github.com/diskfs/go-diskfs/util +# github.com/djherbis/times v1.6.0 +## explicit; go 1.16 +github.com/djherbis/times # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units @@ -1084,6 +1090,16 @@ github.com/kballard/go-shellquote # github.com/kdomanski/iso9660 v0.2.1 ## explicit; go 1.14 github.com/kdomanski/iso9660 +# github.com/klauspost/compress v1.18.0 +## explicit; go 1.22 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/le +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/kr/fs v0.1.0 ## explicit github.com/kr/fs @@ -1316,8 +1332,9 @@ github.com/openshift/api/machineconfiguration/v1alpha1 github.com/openshift/api/operator/v1 github.com/openshift/api/operator/v1alpha1 github.com/openshift/api/route/v1 -# github.com/openshift/assisted-image-service v0.0.0-20240607085136-02df2e56dde6 +# github.com/openshift/assisted-image-service v0.0.0-20250917153356-4ca9ff81f712 ## explicit; go 1.21 +github.com/openshift/assisted-image-service/internal/common github.com/openshift/assisted-image-service/pkg/isoeditor github.com/openshift/assisted-image-service/pkg/overlay # github.com/openshift/assisted-service/api v0.0.0 => github.com/openshift/assisted-service/api v0.0.0-20250922204150-a52b83145bea @@ -1557,7 +1574,7 @@ github.com/shurcooL/httpfs/vfsutil # github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd ## explicit github.com/shurcooL/vfsgen -# github.com/sirupsen/logrus v1.9.3 +# github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af ## explicit; go 1.13 github.com/sirupsen/logrus github.com/sirupsen/logrus/hooks/test @@ -2130,9 +2147,6 @@ google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb -# gopkg.in/djherbis/times.v1 v1.3.0 -## explicit -gopkg.in/djherbis/times.v1 # gopkg.in/evanphx/json-patch.v4 v4.12.0 ## explicit gopkg.in/evanphx/json-patch.v4 From 08a9d09e570bfebe149faf40f28afbc121cba197 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 22 Sep 2025 11:53:54 +1200 Subject: [PATCH 14/14] Move bootstrap disk config to system ignition Avoid modifying the bootstrap user ignition, and instead add the disk configuration for the bootstrap VM to the system ignition. --- pkg/infrastructure/baremetal/bootstrap.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pkg/infrastructure/baremetal/bootstrap.go b/pkg/infrastructure/baremetal/bootstrap.go index aa084f3ccd6..df8062aa9b0 100644 --- a/pkg/infrastructure/baremetal/bootstrap.go +++ b/pkg/infrastructure/baremetal/bootstrap.go @@ -2,7 +2,6 @@ package baremetal import ( "context" - "encoding/json" "encoding/xml" "errors" "fmt" @@ -170,10 +169,10 @@ func getLiveISO(config baremetalConfig, arch string) (string, error) { } func bootstrapIgnition(config baremetalConfig) (*isoeditor.IgnitionContent, error) { - ign := &igntypes.Config{} - // TODO(zaneb): Put swap config into system ignition rather than modifying user ignition - if err := json.Unmarshal([]byte(config.IgnitionBootstrap), &ign); err != nil { - return nil, fmt.Errorf("failed to unmarshal bootstrap Ignition config: %w", err) + ign := &igntypes.Config{ + Ignition: igntypes.Ignition{ + Version: igntypes.MaxVersion.String(), + }, } fsLabel := "var" @@ -220,7 +219,12 @@ RequiredBy=localfs.target if err != nil { return nil, fmt.Errorf("failed to marshal bootstrap Ignition config: %w", err) } - return &isoeditor.IgnitionContent{Config: ignData}, nil + return &isoeditor.IgnitionContent{ + Config: []byte(config.IgnitionBootstrap), + SystemConfigs: map[string][]byte{ + "30-scratch-disk.ign": ignData, + }, + }, nil } func createLiveVolume(virConn *libvirt.Libvirt, config baremetalConfig, pool libvirt.StoragePool) (libvirt.StorageVol, error) {