prompt
stringlengths 183
7.33M
| completion
stringlengths 13
833k
| __index_level_0__
int64 0
215k
|
---|---|---|
Review the shared code context and generate a suite of multiple unit tests for the functions in shared code context using the detected test framework and libraries.
Code context:
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
"github.com/spf13/pflag"
)
type rejectionCache struct {
m map[string]bool
mtx sync.Mutex
}
// Lock locks the mutex in rc.
func (rc *rejectionCache) Lock() {
if rc != nil {
rc.mtx.Lock()
}
}
// Unlock unlocks the mutex in rc.
func (rc *rejectionCache) Unlock() {
if rc != nil {
rc.mtx.Unlock()
}
}
// Get returns the last stored value for dir and a second boolean that
// indicates whether that value was actually written to the cache. It is the
// callers responsibility to call rc.Lock and rc.Unlock before using this
// method, otherwise data races may occur.
func (rc *rejectionCache) Get(dir string) (bool, bool) {
if rc == nil || rc.m == nil {
return false, false
}
v, ok := rc.m[dir]
return v, ok
}
// Store stores a new value for dir. It is the callers responsibility to call
// rc.Lock and rc.Unlock before using this method, otherwise data races may
// occur.
func (rc *rejectionCache) Store(dir string, rejected bool) {
if rc == nil {
return
}
if rc.m == nil {
rc.m = make(map[string]bool)
}
rc.m[dir] = rejected
}
// RejectByNameFunc is a function that takes a filename of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectByNameFunc func(path string) bool
// RejectFunc is a function that takes a filename and os.FileInfo of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectFunc func(path string, fi os.FileInfo) bool
// rejectByPattern returns a RejectByNameFunc which rejects files that match
// one of the patterns.
func rejectByPattern(patterns []string) RejectByNameFunc {
parsedPatterns := filter.ParsePatterns(patterns)
return func(item string) bool {
matched, err := filter.List(parsedPatterns, item)
if err != nil {
Warnf("error for exclude pattern: %v", err)
}
if matched {
debug.Log("path %q excluded by an exclude pattern", item)
return true
}
return false
}
}
// Same as `rejectByPattern` but case insensitive.
func rejectByInsensitivePattern(patterns []string) RejectByNameFunc {
for index, path := range patterns {
patterns[index] = strings.ToLower(path)
}
rejFunc := rejectByPattern(patterns)
return func(item string) bool {
return rejFunc(strings.ToLower(item))
}
}
// rejectIfPresent returns a RejectByNameFunc which itself returns whether a path
// should be excluded. The RejectByNameFunc considers a file to be excluded when
// it resides in a directory with an exclusion file, that is specified by
// excludeFileSpec in the form "filename[:content]". The returned error is
// non-nil if the filename component of excludeFileSpec is empty. If rc is
// non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation
// of a directory based on previous visits.
func rejectIfPresent(excludeFileSpec string) (RejectByNameFunc, error) {
if excludeFileSpec == "" {
return nil, errors.New("name for exclusion tagfile is empty")
}
colon := strings.Index(excludeFileSpec, ":")
if colon == 0 {
return nil, fmt.Errorf("no name for exclusion tagfile provided")
}
tf, tc := "", ""
if colon > 0 {
tf = excludeFileSpec[:colon]
tc = excludeFileSpec[colon+1:]
} else {
tf = excludeFileSpec
}
debug.Log("using %q as exclusion tagfile", tf)
rc := &rejectionCache{}
fn := func(filename string) bool {
return isExcludedByFile(filename, tf, tc, rc)
}
return fn, nil
}
// isExcludedByFile interprets filename as a path and returns true if that file
// is in an excluded directory. A directory is identified as excluded if it contains a
// tagfile which bears the name specified in tagFilename and starts with
// header. If rc is non-nil, it is used to expedite the evaluation of a
// directory based on previous visits.
func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache) bool {
if tagFilename == "" {
return false
}
dir, base := filepath.Split(filename)
if base == tagFilename {
return false // do not exclude the tagfile itself
}
rc.Lock()
defer rc.Unlock()
rejected, visited := rc.Get(dir)
if visited {
return rejected
}
rejected = isDirExcludedByFile(dir, tagFilename, header)
rc.Store(dir, rejected)
return rejected
}
func isDirExcludedByFile(dir, tagFilename, header string) bool {
tf := filepath.Join(dir, tagFilename)
_, err := fs.Lstat(tf)
if os.IsNotExist(err) {
return false
}
if err != nil {
Warnf("could not access exclusion tagfile: %v", err)
return false
}
// when no signature is given, the mere presence of tf is enough reason
// to exclude filename
if len(header) == 0 {
return true
}
// From this stage, errors mean tagFilename exists but it is malformed.
// Warnings will be generated so that the user is informed that the
// indented ignore-action is not performed.
f, err := os.Open(tf)
if err != nil {
Warnf("could not open exclusion tagfile: %v", err)
return false
}
defer func() {
_ = f.Close()
}()
buf := make([]byte, len(header))
_, err = io.ReadFull(f, buf)
// EOF is handled with a dedicated message, otherwise the warning were too cryptic
if err == io.EOF {
Warnf("invalid (too short) signature in exclusion tagfile %q\n", tf)
return false
}
if err != nil {
Warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err)
return false
}
if !bytes.Equal(buf, []byte(header)) {
Warnf("invalid signature in exclusion tagfile %q\n", tf)
return false
}
return true
}
// DeviceMap is used to track allowed source devices for backup. This is used to
// check for crossing mount points during backup (for --one-file-system). It
// maps the name of a source path to its device ID.
type DeviceMap map[string]uint64
// NewDeviceMap creates a new device map from the list of source paths.
func NewDeviceMap(allowedSourcePaths []string) (DeviceMap, error) {
deviceMap := make(map[string]uint64)
for _, item := range allowedSourcePaths {
item, err := filepath.Abs(filepath.Clean(item))
if err != nil {
return nil, err
}
fi, err := fs.Lstat(item)
if err != nil {
return nil, err
}
id, err := fs.DeviceID(fi)
if err != nil {
return nil, err
}
deviceMap[item] = id
}
if len(deviceMap) == 0 {
return nil, errors.New("zero allowed devices")
}
return deviceMap, nil
}
// IsAllowed returns true if the path is located on an allowed device.
func (m DeviceMap) IsAllowed(item string, deviceID uint64) (bool, error) {
for dir := item; ; dir = filepath.Dir(dir) {
debug.Log("item %v, test dir %v", item, dir)
// find a parent directory that is on an allowed device (otherwise
// we would not traverse the directory at all)
allowedID, ok := m[dir]
if !ok {
if dir == filepath.Dir(dir) {
// arrived at root, no allowed device found. this should not happen.
break
}
continue
}
// if the item has a different device ID than the parent directory,
// we crossed a file system boundary
if allowedID != deviceID {
debug.Log("item %v (dir %v) on disallowed device %d", item, dir, deviceID)
return false, nil
}
// item is on allowed device, accept it
debug.Log("item %v allowed", item)
return true, nil
}
return false, fmt.Errorf("item %v (device ID %v) not found, deviceMap: %v", item, deviceID, m)
}
// rejectByDevice returns a RejectFunc that rejects files which are on a
// different file systems than the files/dirs in samples.
func rejectByDevice(samples []string) (RejectFunc, error) {
deviceMap, err := NewDeviceMap(samples)
if err != nil {
return nil, err
}
debug.Log("allowed devices: %v\n", deviceMap)
return func(item string, fi os.FileInfo) bool {
id, err := fs.DeviceID(fi)
if err != nil {
// This should never happen because gatherDevices() would have
// errored out earlier. If it still does that's a reason to panic.
panic(err)
}
allowed, err := deviceMap.IsAllowed(filepath.Clean(item), id)
if err != nil {
// this should not happen
panic(fmt.Sprintf("error checking device ID of %v: %v", item, err))
}
if allowed {
// accept item
return false
}
// reject everything except directories
if !fi.IsDir() {
return true
}
// special case: make sure we keep mountpoints (directories which
// contain a mounted file system). Test this by checking if the parent
// directory would be included.
parentDir := filepath.Dir(filepath.Clean(item))
parentFI, err := fs.Lstat(parentDir)
if err != nil {
debug.Log("item %v: error running lstat() on parent directory: %v", item, err)
// if in doubt, reject
return true
}
parentDeviceID, err := fs.DeviceID(parentFI)
if err != nil {
debug.Log("item %v: getting device ID of parent directory: %v", item, err)
// if in doubt, reject
return true
}
parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID)
if err != nil {
debug.Log("item %v: error checking parent directory: %v", item, err)
// if in doubt, reject
return true
}
if parentAllowed {
// we found a mount point, so accept the directory
return false
}
// reject everything else
return true
}, nil
}
// rejectResticCache returns a RejectByNameFunc that rejects the restic cache
// directory (if set).
func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) {
if repo.Cache == nil {
return func(string) bool {
return false
}, nil
}
cacheBase := repo.Cache.BaseDir()
if cacheBase == "" {
return nil, errors.New("cacheBase is empty string")
}
return func(item string) bool {
if fs.HasPathPrefix(cacheBase, item) {
debug.Log("rejecting restic cache directory %v", item)
return true
}
return false
}, nil
}
func rejectBySize(maxSizeStr string) (RejectFunc, error) {
maxSize, err := ui.ParseBytes(maxSizeStr)
if err != nil {
return nil, err
}
return func(item string, fi os.FileInfo) bool {
// directory will be ignored
if fi.IsDir() {
return false
}
filesize := fi.Size()
if filesize > maxSize {
debug.Log("file %s is oversize: %d", item, filesize)
return true
}
return false
}, nil
}
// readExcludePatternsFromFiles reads all exclude files and returns the list of
// exclude patterns. For each line, leading and trailing white space is removed
// and comment lines are ignored. For each remaining pattern, environment
// variables are resolved. For adding a literal dollar sign ($), write $$ to
// the file.
func readExcludePatternsFromFiles(excludeFiles []string) ([]string, error) {
getenvOrDollar := func(s string) string {
if s == "$" {
return "$"
}
return os.Getenv(s)
}
var excludes []string
for _, filename := range excludeFiles {
err := func() (err error) {
data, err := textfile.Read(filename)
if err != nil {
return err
}
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
// ignore empty lines
if line == "" {
continue
}
// strip comments
if strings.HasPrefix(line, "#") {
continue
}
line = os.Expand(line, getenvOrDollar)
excludes = append(excludes, line)
}
return scanner.Err()
}()
if err != nil {
return nil, err
}
}
return excludes, nil
}
type excludePatternOptions struct {
Excludes []string
InsensitiveExcludes []string
ExcludeFiles []string
InsensitiveExcludeFiles []string
}
func initExcludePatternOptions(f *pflag.FlagSet, opts *excludePatternOptions) {
f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames")
f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns")
}
func (opts *excludePatternOptions) Empty() bool {
return len(opts.Excludes) == 0 && len(opts.InsensitiveExcludes) == 0 && len(opts.ExcludeFiles) == 0 && len(opts.InsensitiveExcludeFiles) == 0
}
func (opts excludePatternOptions) CollectPatterns() ([]RejectByNameFunc, error) {
var fs []RejectByNameFunc
// add patterns from file
if len(opts.ExcludeFiles) > 0 {
excludePatterns, err := readExcludePatternsFromFiles(opts.ExcludeFiles)
if err != nil {
return nil, err
}
if err := filter.ValidatePatterns(excludePatterns); err != nil {
return nil, errors.Fatalf("--exclude-file: %s", err)
}
opts.Excludes = append(opts.Excludes, excludePatterns...)
}
if len(opts.InsensitiveExcludeFiles) > 0 {
excludes, err := readExcludePatternsFromFiles(opts.InsensitiveExcludeFiles)
if err != nil {
return nil, err
}
if err := filter.ValidatePatterns(excludes); err != nil {
return nil, errors.Fatalf("--iexclude-file: %s", err)
}
opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...)
}
if len(opts.InsensitiveExcludes) > 0 {
if err := filter.ValidatePatterns(opts.InsensitiveExcludes); err != nil {
return nil, errors.Fatalf("--iexclude: %s", err)
}
fs = append(fs, rejectByInsensitivePattern(opts.InsensitiveExcludes))
}
if len(opts.Excludes) > 0 {
if err := filter.ValidatePatterns(opts.Excludes); err != nil {
return nil, errors.Fatalf("--exclude: %s", err)
}
fs = append(fs, rejectByPattern(opts.Excludes))
}
return fs, nil
}
| package main
import (
"os"
"path/filepath"
"testing"
"github.com/restic/restic/internal/test"
)
func TestRejectByPattern(t *testing.T) {
var tests = []struct {
filename string
reject bool
}{
{filename: "/home/user/foo.go", reject: true},
{filename: "/home/user/foo.c", reject: false},
{filename: "/home/user/foobar", reject: false},
{filename: "/home/user/foobar/x", reject: true},
{filename: "/home/user/README", reject: false},
{filename: "/home/user/README.md", reject: true},
}
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
for _, tc := range tests {
t.Run("", func(t *testing.T) {
reject := rejectByPattern(patterns)
res := reject(tc.filename)
if res != tc.reject {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.reject, res)
}
})
}
}
func TestRejectByInsensitivePattern(t *testing.T) {
var tests = []struct {
filename string
reject bool
}{
{filename: "/home/user/foo.GO", reject: true},
{filename: "/home/user/foo.c", reject: false},
{filename: "/home/user/foobar", reject: false},
{filename: "/home/user/FOObar/x", reject: true},
{filename: "/home/user/README", reject: false},
{filename: "/home/user/readme.md", reject: true},
}
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
for _, tc := range tests {
t.Run("", func(t *testing.T) {
reject := rejectByInsensitivePattern(patterns)
res := reject(tc.filename)
if res != tc.reject {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.reject, res)
}
})
}
}
func TestIsExcludedByFile(t *testing.T) {
const (
tagFilename = "CACHEDIR.TAG"
header = "Signature: 8a477f597d28d172789f06886806bc55"
)
tests := []struct {
name string
tagFile string
content string
want bool
}{
{"NoTagfile", "", "", false},
{"EmptyTagfile", tagFilename, "", true},
{"UnnamedTagFile", "", header, false},
{"WrongTagFile", "notatagfile", header, false},
{"IncorrectSig", tagFilename, header[1:], false},
{"ValidSig", tagFilename, header, true},
{"ValidPlusStuff", tagFilename, header + "foo", true},
{"ValidPlusNewlineAndStuff", tagFilename, header + "\nbar", true},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tempDir := test.TempDir(t)
foo := filepath.Join(tempDir, "foo")
err := os.WriteFile(foo, []byte("foo"), 0666)
if err != nil {
t.Fatalf("could not write file: %v", err)
}
if tc.tagFile != "" {
tagFile := filepath.Join(tempDir, tc.tagFile)
err = os.WriteFile(tagFile, []byte(tc.content), 0666)
if err != nil {
t.Fatalf("could not write tagfile: %v", err)
}
}
h := header
if tc.content == "" {
h = ""
}
if got := isExcludedByFile(foo, tagFilename, h, nil); tc.want != got {
t.Fatalf("expected %v, got %v", tc.want, got)
}
})
}
}
// TestMultipleIsExcludedByFile is for testing that multiple instances of
// the --exclude-if-present parameter (or the shortcut --exclude-caches do not
// cancel each other out. It was initially written to demonstrate a bug in
// rejectIfPresent.
func TestMultipleIsExcludedByFile(t *testing.T) {
tempDir := test.TempDir(t)
// Create some files in a temporary directory.
// Files in UPPERCASE will be used as exclusion triggers later on.
// We will test the inclusion later, so we add the expected value as
// a bool.
files := []struct {
path string
incl bool
}{
{"42", true},
// everything in foodir except the NOFOO tagfile
// should not be included.
{"foodir/NOFOO", true},
{"foodir/foo", false},
{"foodir/foosub/underfoo", false},
// everything in bardir except the NOBAR tagfile
// should not be included.
{"bardir/NOBAR", true},
{"bardir/bar", false},
{"bardir/barsub/underbar", false},
// everything in bazdir should be included.
{"bazdir/baz", true},
{"bazdir/bazsub/underbaz", true},
}
var errs []error
for _, f := range files {
// create directories first, then the file
p := filepath.Join(tempDir, filepath.FromSlash(f.path))
errs = append(errs, os.MkdirAll(filepath.Dir(p), 0700))
errs = append(errs, os.WriteFile(p, []byte(f.path), 0600))
}
test.OKs(t, errs) // see if anything went wrong during the creation
// create two rejection functions, one that tests for the NOFOO file
// and one for the NOBAR file
fooExclude, _ := rejectIfPresent("NOFOO")
barExclude, _ := rejectIfPresent("NOBAR")
// To mock the archiver scanning walk, we create filepath.WalkFn
// that tests against the two rejection functions and stores
// the result in a map against we can test later.
m := make(map[string]bool)
walk := func(p string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
excludedByFoo := fooExclude(p)
excludedByBar := barExclude(p)
excluded := excludedByFoo || excludedByBar
// the log message helps debugging in case the test fails
t.Logf("%q: %v || %v = %v", p, excludedByFoo, excludedByBar, excluded)
m[p] = !excluded
if excluded {
return filepath.SkipDir
}
return nil
}
// walk through the temporary file and check the error
test.OK(t, filepath.Walk(tempDir, walk))
// compare whether the walk gave the expected values for the test cases
for _, f := range files {
p := filepath.Join(tempDir, filepath.FromSlash(f.path))
if m[p] != f.incl {
t.Errorf("inclusion status of %s is wrong: want %v, got %v", f.path, f.incl, m[p])
}
}
}
// TestIsExcludedByFileSize is for testing the instance of
// --exclude-larger-than parameters
func TestIsExcludedByFileSize(t *testing.T) {
tempDir := test.TempDir(t)
// Max size of file is set to be 1k
maxSizeStr := "1k"
// Create some files in a temporary directory.
// Files in UPPERCASE will be used as exclusion triggers later on.
// We will test the inclusion later, so we add the expected value as
// a bool.
files := []struct {
path string
size int64
incl bool
}{
{"42", 100, true},
// everything in foodir except the FOOLARGE tagfile
// should not be included.
{"foodir/FOOLARGE", 2048, false},
{"foodir/foo", 1002, true},
{"foodir/foosub/underfoo", 100, true},
// everything in bardir except the BARLARGE tagfile
// should not be included.
{"bardir/BARLARGE", 1030, false},
{"bardir/bar", 1000, true},
{"bardir/barsub/underbar", 500, true},
// everything in bazdir should be included.
{"bazdir/baz", 100, true},
{"bazdir/bazsub/underbaz", 200, true},
}
var errs []error
for _, f := range files {
// create directories first, then the file
p := filepath.Join(tempDir, filepath.FromSlash(f.path))
errs = append(errs, os.MkdirAll(filepath.Dir(p), 0700))
file, err := os.OpenFile(p, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
errs = append(errs, err)
if err == nil {
// create a file with given size
errs = append(errs, file.Truncate(f.size))
}
errs = append(errs, file.Close())
}
test.OKs(t, errs) // see if anything went wrong during the creation
// create rejection function
sizeExclude, _ := rejectBySize(maxSizeStr)
// To mock the archiver scanning walk, we create filepath.WalkFn
// that tests against the two rejection functions and stores
// the result in a map against we can test later.
m := make(map[string]bool)
walk := func(p string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
excluded := sizeExclude(p, fi)
// the log message helps debugging in case the test fails
t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded)
m[p] = !excluded
return nil
}
// walk through the temporary file and check the error
test.OK(t, filepath.Walk(tempDir, walk))
// compare whether the walk gave the expected values for the test cases
for _, f := range files {
p := filepath.Join(tempDir, filepath.FromSlash(f.path))
if m[p] != f.incl {
t.Errorf("inclusion status of %s is wrong: want %v, got %v", f.path, f.incl, m[p])
}
}
}
func TestDeviceMap(t *testing.T) {
deviceMap := DeviceMap{
filepath.FromSlash("/"): 1,
filepath.FromSlash("/usr/local"): 5,
}
var tests = []struct {
item string
deviceID uint64
allowed bool
}{
{"/root", 1, true},
{"/usr", 1, true},
{"/proc", 2, false},
{"/proc/1234", 2, false},
{"/usr", 3, false},
{"/usr/share", 3, false},
{"/usr/local", 5, true},
{"/usr/local/foobar", 5, true},
{"/usr/local/foobar/submount", 23, false},
{"/usr/local/foobar/submount/file", 23, false},
{"/usr/local/foobar/outhersubmount", 1, false},
{"/usr/local/foobar/outhersubmount/otherfile", 1, false},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID)
if err != nil {
t.Fatal(err)
}
if res != test.allowed {
t.Fatalf("wrong result returned by IsAllowed(%v): want %v, got %v", test.item, test.allowed, res)
}
})
}
}
| 215,462 |
Review the shared code context and generate a suite of multiple unit tests for the functions in shared code context using the detected test framework and libraries.
Code context:
package main
import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/dump"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/spf13/cobra"
)
var cmdDump = &cobra.Command{
Use: "dump [flags] snapshotID file",
Short: "Print a backed-up file to stdout",
Long: `
The "dump" command extracts files from a snapshot from the repository. If a
single file is selected, it prints its contents to stdout. Folders are output
as a tar (default) or zip file containing the contents of the specified folder.
Pass "/" as file name to dump the whole snapshot as an archive file.
The special snapshotID "latest" can be used to use the latest snapshot in the
repository.
To include the folder content at the root of the archive, you can use the
"<snapshotID>:<subfolder>" syntax, where "subfolder" is a path within the
snapshot.
EXIT STATUS
===========
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runDump(cmd.Context(), dumpOptions, globalOptions, args)
},
}
// DumpOptions collects all options for the dump command.
type DumpOptions struct {
restic.SnapshotFilter
Archive string
}
var dumpOptions DumpOptions
func init() {
cmdRoot.AddCommand(cmdDump)
flags := cmdDump.Flags()
initSingleSnapshotFilter(flags, &dumpOptions.SnapshotFilter)
flags.StringVarP(&dumpOptions.Archive, "archive", "a", "tar", "set archive `format` as \"tar\" or \"zip\"")
}
func splitPath(p string) []string {
d, f := path.Split(p)
if d == "" || d == "/" {
return []string{f}
}
s := splitPath(path.Join("/", d))
return append(s, f)
}
func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string, d *dump.Dumper) error {
// If we print / we need to assume that there are multiple nodes at that
// level in the tree.
if pathComponents[0] == "" {
if err := checkStdoutArchive(); err != nil {
return err
}
return d.DumpTree(ctx, tree, "/")
}
item := filepath.Join(prefix, pathComponents[0])
l := len(pathComponents)
for _, node := range tree.Nodes {
// If dumping something in the highest level it will just take the
// first item it finds and dump that according to the switch case below.
if node.Name == pathComponents[0] {
switch {
case l == 1 && dump.IsFile(node):
return d.WriteNode(ctx, node)
case l > 1 && dump.IsDir(node):
subtree, err := restic.LoadTree(ctx, repo, *node.Subtree)
if err != nil {
return errors.Wrapf(err, "cannot load subtree for %q", item)
}
return printFromTree(ctx, subtree, repo, item, pathComponents[1:], d)
case dump.IsDir(node):
if err := checkStdoutArchive(); err != nil {
return err
}
subtree, err := restic.LoadTree(ctx, repo, *node.Subtree)
if err != nil {
return err
}
return d.DumpTree(ctx, subtree, item)
case l > 1:
return fmt.Errorf("%q should be a dir, but is a %q", item, node.Type)
case !dump.IsFile(node):
return fmt.Errorf("%q should be a file, but is a %q", item, node.Type)
}
}
}
return fmt.Errorf("path %q not found in snapshot", item)
}
func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []string) error {
if len(args) != 2 {
return errors.Fatal("no file and no snapshot ID specified")
}
switch opts.Archive {
case "tar", "zip":
default:
return fmt.Errorf("unknown archive format %q", opts.Archive)
}
snapshotIDString := args[0]
pathToPrint := args[1]
debug.Log("dump file %q from %q", pathToPrint, snapshotIDString)
splittedPath := splitPath(path.Clean(pathToPrint))
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
sn, subfolder, err := (&restic.SnapshotFilter{
Hosts: opts.Hosts,
Paths: opts.Paths,
Tags: opts.Tags,
}).FindLatest(ctx, repo, repo, snapshotIDString)
if err != nil {
return errors.Fatalf("failed to find snapshot: %v", err)
}
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err = repo.LoadIndex(ctx, bar)
if err != nil {
return err
}
sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
if err != nil {
return err
}
tree, err := restic.LoadTree(ctx, repo, *sn.Tree)
if err != nil {
return errors.Fatalf("loading tree for snapshot %q failed: %v", snapshotIDString, err)
}
d := dump.New(opts.Archive, repo, os.Stdout)
err = printFromTree(ctx, tree, repo, "/", splittedPath, d)
if err != nil {
return errors.Fatalf("cannot dump file: %v", err)
}
return nil
}
func checkStdoutArchive() error {
if stdoutIsTerminal() {
return fmt.Errorf("stdout is the terminal, please redirect output")
}
return nil
}
| package main
import (
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestDumpSplitPath(t *testing.T) {
testPaths := []struct {
path string
result []string
}{
{"", []string{""}},
{"test", []string{"test"}},
{"test/dir", []string{"test", "dir"}},
{"test/dir/sub", []string{"test", "dir", "sub"}},
{"/", []string{""}},
{"/test", []string{"test"}},
{"/test/dir", []string{"test", "dir"}},
{"/test/dir/sub", []string{"test", "dir", "sub"}},
}
for _, path := range testPaths {
parts := splitPath(path.path)
rtest.Equals(t, path.result, parts)
}
}
| 215,463 |
Review the shared code context and generate a suite of multiple unit tests for the functions in shared code context using the detected test framework and libraries.
Code context:
package main
import (
"os"
"github.com/restic/restic/internal/errors"
"github.com/spf13/pflag"
)
type secondaryRepoOptions struct {
password string
// from-repo options
Repo string
RepositoryFile string
PasswordFile string
PasswordCommand string
KeyHint string
// repo2 options
LegacyRepo string
LegacyRepositoryFile string
LegacyPasswordFile string
LegacyPasswordCommand string
LegacyKeyHint string
}
func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repoPrefix string, repoUsage string) {
f.StringVarP(&opts.LegacyRepo, "repo2", "", "", repoPrefix+" `repository` "+repoUsage+" (default: $RESTIC_REPOSITORY2)")
f.StringVarP(&opts.LegacyRepositoryFile, "repository-file2", "", "", "`file` from which to read the "+repoPrefix+" repository location "+repoUsage+" (default: $RESTIC_REPOSITORY_FILE2)")
f.StringVarP(&opts.LegacyPasswordFile, "password-file2", "", "", "`file` to read the "+repoPrefix+" repository password from (default: $RESTIC_PASSWORD_FILE2)")
f.StringVarP(&opts.LegacyKeyHint, "key-hint2", "", "", "key ID of key to try decrypting the "+repoPrefix+" repository first (default: $RESTIC_KEY_HINT2)")
f.StringVarP(&opts.LegacyPasswordCommand, "password-command2", "", "", "shell `command` to obtain the "+repoPrefix+" repository password from (default: $RESTIC_PASSWORD_COMMAND2)")
// hide repo2 options
_ = f.MarkDeprecated("repo2", "use --repo or --from-repo instead")
_ = f.MarkDeprecated("repository-file2", "use --repository-file or --from-repository-file instead")
_ = f.MarkHidden("password-file2")
_ = f.MarkHidden("key-hint2")
_ = f.MarkHidden("password-command2")
opts.LegacyRepo = os.Getenv("RESTIC_REPOSITORY2")
opts.LegacyRepositoryFile = os.Getenv("RESTIC_REPOSITORY_FILE2")
opts.LegacyPasswordFile = os.Getenv("RESTIC_PASSWORD_FILE2")
opts.LegacyKeyHint = os.Getenv("RESTIC_KEY_HINT2")
opts.LegacyPasswordCommand = os.Getenv("RESTIC_PASSWORD_COMMAND2")
f.StringVarP(&opts.Repo, "from-repo", "", "", "source `repository` "+repoUsage+" (default: $RESTIC_FROM_REPOSITORY)")
f.StringVarP(&opts.RepositoryFile, "from-repository-file", "", "", "`file` from which to read the source repository location "+repoUsage+" (default: $RESTIC_FROM_REPOSITORY_FILE)")
f.StringVarP(&opts.PasswordFile, "from-password-file", "", "", "`file` to read the source repository password from (default: $RESTIC_FROM_PASSWORD_FILE)")
f.StringVarP(&opts.KeyHint, "from-key-hint", "", "", "key ID of key to try decrypting the source repository first (default: $RESTIC_FROM_KEY_HINT)")
f.StringVarP(&opts.PasswordCommand, "from-password-command", "", "", "shell `command` to obtain the source repository password from (default: $RESTIC_FROM_PASSWORD_COMMAND)")
opts.Repo = os.Getenv("RESTIC_FROM_REPOSITORY")
opts.RepositoryFile = os.Getenv("RESTIC_FROM_REPOSITORY_FILE")
opts.PasswordFile = os.Getenv("RESTIC_FROM_PASSWORD_FILE")
opts.KeyHint = os.Getenv("RESTIC_FROM_KEY_HINT")
opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND")
}
func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) {
if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" {
return GlobalOptions{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)")
}
hasFromRepo := opts.Repo != "" || opts.RepositoryFile != "" || opts.PasswordFile != "" ||
opts.KeyHint != "" || opts.PasswordCommand != ""
hasRepo2 := opts.LegacyRepo != "" || opts.LegacyRepositoryFile != "" || opts.LegacyPasswordFile != "" ||
opts.LegacyKeyHint != "" || opts.LegacyPasswordCommand != ""
if hasFromRepo && hasRepo2 {
return GlobalOptions{}, false, errors.Fatal("Option groups repo2 and from-repo are mutually exclusive, please specify only one")
}
var err error
dstGopts := gopts
var pwdEnv string
if hasFromRepo {
if opts.Repo != "" && opts.RepositoryFile != "" {
return GlobalOptions{}, false, errors.Fatal("Options --from-repo and --from-repository-file are mutually exclusive, please specify only one")
}
dstGopts.Repo = opts.Repo
dstGopts.RepositoryFile = opts.RepositoryFile
dstGopts.PasswordFile = opts.PasswordFile
dstGopts.PasswordCommand = opts.PasswordCommand
dstGopts.KeyHint = opts.KeyHint
pwdEnv = "RESTIC_FROM_PASSWORD"
repoPrefix = "source"
} else {
if opts.LegacyRepo != "" && opts.LegacyRepositoryFile != "" {
return GlobalOptions{}, false, errors.Fatal("Options --repo2 and --repository-file2 are mutually exclusive, please specify only one")
}
dstGopts.Repo = opts.LegacyRepo
dstGopts.RepositoryFile = opts.LegacyRepositoryFile
dstGopts.PasswordFile = opts.LegacyPasswordFile
dstGopts.PasswordCommand = opts.LegacyPasswordCommand
dstGopts.KeyHint = opts.LegacyKeyHint
pwdEnv = "RESTIC_PASSWORD2"
}
if opts.password != "" {
dstGopts.password = opts.password
} else {
dstGopts.password, err = resolvePassword(dstGopts, pwdEnv)
if err != nil {
return GlobalOptions{}, false, err
}
}
dstGopts.password, err = ReadPassword(dstGopts, "enter password for "+repoPrefix+" repository: ")
if err != nil {
return GlobalOptions{}, false, err
}
return dstGopts, hasFromRepo, nil
}
| package main
import (
"os"
"path/filepath"
"testing"
rtest "github.com/restic/restic/internal/test"
)
// TestFillSecondaryGlobalOpts tests valid and invalid data on fillSecondaryGlobalOpts-function
func TestFillSecondaryGlobalOpts(t *testing.T) {
//secondaryRepoTestCase defines a struct for test cases
type secondaryRepoTestCase struct {
Opts secondaryRepoOptions
DstGOpts GlobalOptions
FromRepo bool
}
//validSecondaryRepoTestCases is a list with test cases that must pass
var validSecondaryRepoTestCases = []secondaryRepoTestCase{
{
// Test if Repo and Password are parsed correctly.
Opts: secondaryRepoOptions{
Repo: "backupDst",
password: "secretDst",
},
DstGOpts: GlobalOptions{
Repo: "backupDst",
password: "secretDst",
},
FromRepo: true,
},
{
// Test if RepositoryFile and PasswordFile are parsed correctly.
Opts: secondaryRepoOptions{
RepositoryFile: "backupDst",
PasswordFile: "passwordFileDst",
},
DstGOpts: GlobalOptions{
RepositoryFile: "backupDst",
password: "secretDst",
PasswordFile: "passwordFileDst",
},
FromRepo: true,
},
{
// Test if RepositoryFile and PasswordCommand are parsed correctly.
Opts: secondaryRepoOptions{
RepositoryFile: "backupDst",
PasswordCommand: "echo secretDst",
},
DstGOpts: GlobalOptions{
RepositoryFile: "backupDst",
password: "secretDst",
PasswordCommand: "echo secretDst",
},
FromRepo: true,
},
{
// Test if LegacyRepo and Password are parsed correctly.
Opts: secondaryRepoOptions{
LegacyRepo: "backupDst",
password: "secretDst",
},
DstGOpts: GlobalOptions{
Repo: "backupDst",
password: "secretDst",
},
},
{
// Test if LegacyRepositoryFile and LegacyPasswordFile are parsed correctly.
Opts: secondaryRepoOptions{
LegacyRepositoryFile: "backupDst",
LegacyPasswordFile: "passwordFileDst",
},
DstGOpts: GlobalOptions{
RepositoryFile: "backupDst",
password: "secretDst",
PasswordFile: "passwordFileDst",
},
},
{
// Test if LegacyRepositoryFile and LegacyPasswordCommand are parsed correctly.
Opts: secondaryRepoOptions{
LegacyRepositoryFile: "backupDst",
LegacyPasswordCommand: "echo secretDst",
},
DstGOpts: GlobalOptions{
RepositoryFile: "backupDst",
password: "secretDst",
PasswordCommand: "echo secretDst",
},
},
}
//invalidSecondaryRepoTestCases is a list with test cases that must fail
var invalidSecondaryRepoTestCases = []secondaryRepoTestCase{
{
// Test must fail on no repo given.
Opts: secondaryRepoOptions{},
},
{
// Test must fail as Repo and RepositoryFile are both given
Opts: secondaryRepoOptions{
Repo: "backupDst",
RepositoryFile: "backupDst",
},
},
{
// Test must fail as PasswordFile and PasswordCommand are both given
Opts: secondaryRepoOptions{
Repo: "backupDst",
PasswordFile: "passwordFileDst",
PasswordCommand: "notEmpty",
},
},
{
// Test must fail as PasswordFile does not exist
Opts: secondaryRepoOptions{
Repo: "backupDst",
PasswordFile: "NonExistingFile",
},
},
{
// Test must fail as PasswordCommand does not exist
Opts: secondaryRepoOptions{
Repo: "backupDst",
PasswordCommand: "notEmpty",
},
},
{
// Test must fail as no password is given.
Opts: secondaryRepoOptions{
Repo: "backupDst",
},
},
{
// Test must fail as current and legacy options are mixed
Opts: secondaryRepoOptions{
Repo: "backupDst",
LegacyRepo: "backupDst",
},
},
{
// Test must fail as current and legacy options are mixed
Opts: secondaryRepoOptions{
Repo: "backupDst",
LegacyPasswordCommand: "notEmpty",
},
},
}
//gOpts defines the Global options used in the secondary repository tests
var gOpts = GlobalOptions{
Repo: "backupSrc",
RepositoryFile: "backupSrc",
password: "secretSrc",
PasswordFile: "passwordFileSrc",
}
//Create temp dir to create password file.
dir := rtest.TempDir(t)
cleanup := rtest.Chdir(t, dir)
defer cleanup()
//Create temporary password file
err := os.WriteFile(filepath.Join(dir, "passwordFileDst"), []byte("secretDst"), 0666)
rtest.OK(t, err)
// Test all valid cases
for _, testCase := range validSecondaryRepoTestCases {
DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination")
rtest.OK(t, err)
rtest.Equals(t, DstGOpts, testCase.DstGOpts)
rtest.Equals(t, isFromRepo, testCase.FromRepo)
}
// Test all invalid cases
for _, testCase := range invalidSecondaryRepoTestCases {
_, _, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination")
rtest.Assert(t, err != nil, "Expected error, but function did not return an error")
}
}
| 215,464 |
Review the shared code context and generate a suite of multiple unit tests for the functions in shared code context using the detected test framework and libraries.
Code context:
package main
import (
"context"
"encoding/json"
"strings"
"github.com/spf13/cobra"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
)
var cmdCat = &cobra.Command{
Use: "cat [flags] [masterkey|config|pack ID|blob ID|snapshot ID|index ID|key ID|lock ID|tree snapshot:subfolder]",
Short: "Print internal objects to stdout",
Long: `
The "cat" command is used to print internal objects to stdout.
EXIT STATUS
===========
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runCat(cmd.Context(), globalOptions, args)
},
}
func init() {
cmdRoot.AddCommand(cmdCat)
}
func validateCatArgs(args []string) error {
var allowedCmds = []string{"config", "index", "snapshot", "key", "masterkey", "lock", "pack", "blob", "tree"}
if len(args) < 1 {
return errors.Fatal("type not specified")
}
validType := false
for _, v := range allowedCmds {
if v == args[0] {
validType = true
break
}
}
if !validType {
return errors.Fatalf("invalid type %q, must be one of [%s]", args[0], strings.Join(allowedCmds, "|"))
}
if args[0] != "masterkey" && args[0] != "config" && len(args) != 2 {
return errors.Fatal("ID not specified")
}
return nil
}
func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
if err := validateCatArgs(args); err != nil {
return err
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
tpe := args[0]
var id restic.ID
if tpe != "masterkey" && tpe != "config" && tpe != "snapshot" && tpe != "tree" {
id, err = restic.ParseID(args[1])
if err != nil {
return errors.Fatalf("unable to parse ID: %v\n", err)
}
}
switch tpe {
case "config":
buf, err := json.MarshalIndent(repo.Config(), "", " ")
if err != nil {
return err
}
Println(string(buf))
return nil
case "index":
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
if err != nil {
return err
}
Println(string(buf))
return nil
case "snapshot":
sn, _, err := restic.FindSnapshot(ctx, repo, repo, args[1])
if err != nil {
return errors.Fatalf("could not find snapshot: %v\n", err)
}
buf, err := json.MarshalIndent(sn, "", " ")
if err != nil {
return err
}
Println(string(buf))
return nil
case "key":
key, err := repository.LoadKey(ctx, repo, id)
if err != nil {
return err
}
buf, err := json.MarshalIndent(&key, "", " ")
if err != nil {
return err
}
Println(string(buf))
return nil
case "masterkey":
buf, err := json.MarshalIndent(repo.Key(), "", " ")
if err != nil {
return err
}
Println(string(buf))
return nil
case "lock":
lock, err := restic.LoadLock(ctx, repo, id)
if err != nil {
return err
}
buf, err := json.MarshalIndent(&lock, "", " ")
if err != nil {
return err
}
Println(string(buf))
return nil
case "pack":
h := backend.Handle{Type: restic.PackFile, Name: id.String()}
buf, err := backend.LoadAll(ctx, nil, repo.Backend(), h)
if err != nil {
return err
}
hash := restic.Hash(buf)
if !hash.Equal(id) {
Warnf("Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String())
}
_, err = globalOptions.stdout.Write(buf)
return err
case "blob":
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err = repo.LoadIndex(ctx, bar)
if err != nil {
return err
}
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
bh := restic.BlobHandle{ID: id, Type: t}
if !repo.Index().Has(bh) {
continue
}
buf, err := repo.LoadBlob(ctx, t, id, nil)
if err != nil {
return err
}
_, err = globalOptions.stdout.Write(buf)
return err
}
return errors.Fatal("blob not found")
case "tree":
sn, subfolder, err := restic.FindSnapshot(ctx, repo, repo, args[1])
if err != nil {
return errors.Fatalf("could not find snapshot: %v\n", err)
}
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err = repo.LoadIndex(ctx, bar)
if err != nil {
return err
}
sn.Tree, err = restic.FindTreeDirectory(ctx, repo, sn.Tree, subfolder)
if err != nil {
return err
}
buf, err := repo.LoadBlob(ctx, restic.TreeBlob, *sn.Tree, nil)
if err != nil {
return err
}
_, err = globalOptions.stdout.Write(buf)
return err
default:
return errors.Fatal("invalid type")
}
}
| package main
import (
"strings"
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestCatArgsValidation(t *testing.T) {
for _, test := range []struct {
args []string
err string
}{
{[]string{}, "Fatal: type not specified"},
{[]string{"masterkey"}, ""},
{[]string{"invalid"}, `Fatal: invalid type "invalid"`},
{[]string{"snapshot"}, "Fatal: ID not specified"},
{[]string{"snapshot", "12345678"}, ""},
} {
t.Run("", func(t *testing.T) {
err := validateCatArgs(test.args)
if test.err == "" {
rtest.Assert(t, err == nil, "unexpected error %q", err)
} else {
rtest.Assert(t, strings.Contains(err.Error(), test.err), "unexpected error expected %q to contain %q", err, test.err)
}
})
}
}
| 215,465 |
Review the shared code context and generate a suite of multiple unit tests for the functions in shared code context using the detected test framework and libraries.
Code context:
package ui
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math/bits"
"strconv"
"time"
)
func FormatBytes(c uint64) string {
b := float64(c)
switch {
case c >= 1<<40:
return fmt.Sprintf("%.3f TiB", b/(1<<40))
case c >= 1<<30:
return fmt.Sprintf("%.3f GiB", b/(1<<30))
case c >= 1<<20:
return fmt.Sprintf("%.3f MiB", b/(1<<20))
case c >= 1<<10:
return fmt.Sprintf("%.3f KiB", b/(1<<10))
default:
return fmt.Sprintf("%d B", c)
}
}
// FormatPercent formats numerator/denominator as a percentage.
func FormatPercent(numerator uint64, denominator uint64) string {
if denominator == 0 {
return ""
}
percent := 100.0 * float64(numerator) / float64(denominator)
if percent > 100 {
percent = 100
}
return fmt.Sprintf("%3.2f%%", percent)
}
// FormatDuration formats d as FormatSeconds would.
func FormatDuration(d time.Duration) string {
sec := uint64(d / time.Second)
return FormatSeconds(sec)
}
// FormatSeconds formats sec as MM:SS, or HH:MM:SS if sec seconds
// is at least an hour.
func FormatSeconds(sec uint64) string {
hours := sec / 3600
sec -= hours * 3600
min := sec / 60
sec -= min * 60
if hours > 0 {
return fmt.Sprintf("%d:%02d:%02d", hours, min, sec)
}
return fmt.Sprintf("%d:%02d", min, sec)
}
// ParseBytes parses a size in bytes from s. It understands the suffixes
// B, K, M, G and T for powers of 1024.
func ParseBytes(s string) (int64, error) {
if s == "" {
return 0, errors.New("expected size, got empty string")
}
numStr := s[:len(s)-1]
var unit uint64 = 1
switch s[len(s)-1] {
case 'b', 'B':
// use initialized values, do nothing here
case 'k', 'K':
unit = 1024
case 'm', 'M':
unit = 1024 * 1024
case 'g', 'G':
unit = 1024 * 1024 * 1024
case 't', 'T':
unit = 1024 * 1024 * 1024 * 1024
default:
numStr = s
}
value, err := strconv.ParseInt(numStr, 10, 64)
if err != nil {
return 0, err
}
hi, lo := bits.Mul64(uint64(value), unit)
value = int64(lo)
if hi != 0 || value < 0 {
return 0, fmt.Errorf("ParseSize: %q: %w", numStr, strconv.ErrRange)
}
return value, nil
}
func ToJSONString(status interface{}) string {
buf := new(bytes.Buffer)
err := json.NewEncoder(buf).Encode(status)
if err != nil {
panic(err)
}
return buf.String()
}
| package main
import (
"testing"
"time"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestFormatNode(t *testing.T) {
// overwrite time zone to ensure the data is formatted reproducibly
tz := time.Local
time.Local = time.UTC
defer func() {
time.Local = tz
}()
testPath := "/test/path"
node := restic.Node{
Name: "baz",
Type: "file",
Size: 14680064,
UID: 1000,
GID: 2000,
ModTime: time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC),
}
for _, c := range []struct {
path string
restic.Node
long bool
human bool
expect string
}{
{
path: testPath,
Node: node,
long: false,
human: false,
expect: testPath,
},
{
path: testPath,
Node: node,
long: true,
human: false,
expect: "---------- 1000 2000 14680064 2020-01-02 03:04:05 " + testPath,
},
{
path: testPath,
Node: node,
long: true,
human: true,
expect: "---------- 1000 2000 14.000 MiB 2020-01-02 03:04:05 " + testPath,
},
} {
r := formatNode(c.path, &c.Node, c.long, c.human)
rtest.Equals(t, c.expect, r)
}
}
| 215,466 |
Review the shared code context and generate a suite of multiple unit tests for the functions in shared code context using the detected test framework and libraries.
Code context:
package main
import (
"fmt"
"os"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui"
)
func formatNode(path string, n *restic.Node, long bool, human bool) string {
if !long {
return path
}
var mode os.FileMode
var target string
var size string
if human {
size = ui.FormatBytes(n.Size)
} else {
size = fmt.Sprintf("%6d", n.Size)
}
switch n.Type {
case "file":
mode = 0
case "dir":
mode = os.ModeDir
case "symlink":
mode = os.ModeSymlink
target = fmt.Sprintf(" -> %v", n.LinkTarget)
case "dev":
mode = os.ModeDevice
case "chardev":
mode = os.ModeDevice | os.ModeCharDevice
case "fifo":
mode = os.ModeNamedPipe
case "socket":
mode = os.ModeSocket
}
return fmt.Sprintf("%s %5d %5d %s %s %s%s",
mode|n.Mode, n.UID, n.GID, size,
n.ModTime.Local().Format(TimeFormat), path,
target)
}
| package main
import (
"testing"
"time"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestFormatNode(t *testing.T) {
// overwrite time zone to ensure the data is formatted reproducibly
tz := time.Local
time.Local = time.UTC
defer func() {
time.Local = tz
}()
testPath := "/test/path"
node := restic.Node{
Name: "baz",
Type: "file",
Size: 14680064,
UID: 1000,
GID: 2000,
ModTime: time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC),
}
for _, c := range []struct {
path string
restic.Node
long bool
human bool
expect string
}{
{
path: testPath,
Node: node,
long: false,
human: false,
expect: testPath,
},
{
path: testPath,
Node: node,
long: true,
human: false,
expect: "---------- 1000 2000 14680064 2020-01-02 03:04:05 " + testPath,
},
{
path: testPath,
Node: node,
long: true,
human: true,
expect: "---------- 1000 2000 14.000 MiB 2020-01-02 03:04:05 " + testPath,
},
} {
r := formatNode(c.path, &c.Node, c.long, c.human)
rtest.Equals(t, c.expect, r)
}
}
| 215,467 |
Review the shared code context and generate a suite of multiple unit tests for the functions in shared code context using the detected test framework and libraries.
Code context:
package main
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/backup"
"github.com/restic/restic/internal/ui/termstatus"
)
var cmdBackup = &cobra.Command{
Use: "backup [flags] [FILE/DIR] ...",
Short: "Create a new backup of files and/or directories",
Long: `
The "backup" command creates a new snapshot and saves the files and directories
given as the arguments.
EXIT STATUS
===========
Exit status is 0 if the command was successful.
Exit status is 1 if there was a fatal error (no snapshot created).
Exit status is 3 if some source data could not be read (incomplete snapshot created).
`,
PreRun: func(cmd *cobra.Command, args []string) {
if backupOptions.Host == "" {
hostname, err := os.Hostname()
if err != nil {
debug.Log("os.Hostname() returned err: %v", err)
return
}
backupOptions.Host = hostname
}
},
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
var wg sync.WaitGroup
cancelCtx, cancel := context.WithCancel(ctx)
defer func() {
// shutdown termstatus
cancel()
wg.Wait()
}()
term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
wg.Add(1)
go func() {
defer wg.Done()
term.Run(cancelCtx)
}()
// use the terminal for stdout/stderr
prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr
defer func() {
globalOptions.stdout, globalOptions.stderr = prevStdout, prevStderr
}()
stdioWrapper := ui.NewStdioWrapper(term)
globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr()
return runBackup(ctx, backupOptions, globalOptions, term, args)
},
}
// BackupOptions bundles all options for the backup command.
type BackupOptions struct {
excludePatternOptions
Parent string
GroupBy restic.SnapshotGroupByOptions
Force bool
ExcludeOtherFS bool
ExcludeIfPresent []string
ExcludeCaches bool
ExcludeLargerThan string
Stdin bool
StdinFilename string
StdinCommand bool
Tags restic.TagLists
Host string
FilesFrom []string
FilesFromVerbatim []string
FilesFromRaw []string
TimeStamp string
WithAtime bool
IgnoreInode bool
IgnoreCtime bool
UseFsSnapshot bool
DryRun bool
ReadConcurrency uint
NoScan bool
}
var backupOptions BackupOptions
// ErrInvalidSourceData is used to report an incomplete backup
var ErrInvalidSourceData = errors.New("at least one source file could not be read")
func init() {
cmdRoot.AddCommand(cmdBackup)
f := cmdBackup.Flags()
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent `snapshot` (default: latest snapshot in the group determined by --group-by and not newer than the timestamp determined by --time)")
backupOptions.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
initExcludePatternOptions(f, &backupOptions.excludePatternOptions)
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems, don't cross filesystem boundaries and subvolumes")
f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes `filename[:header]`, exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)")
f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file. See https://bford.info/cachedir/ for the Cache Directory Tagging Standard`)
f.StringVar(&backupOptions.ExcludeLargerThan, "exclude-larger-than", "", "max `size` of the files to be backed up (allowed suffixes: k/K, m/M, g/G, t/T)")
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "`filename` to use when reading from stdin")
f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "execute command and store its stdout")
f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)")
f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)")
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag")
f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually")
err := f.MarkDeprecated("hostname", "use --host")
if err != nil {
// MarkDeprecated only returns an error when the flag could not be found
panic(err)
}
f.StringArrayVar(&backupOptions.FilesFrom, "files-from", nil, "read the files to backup from `file` (can be combined with file args; can be specified multiple times)")
f.StringArrayVar(&backupOptions.FilesFromVerbatim, "files-from-verbatim", nil, "read the files to backup from `file` (can be combined with file args; can be specified multiple times)")
f.StringArrayVar(&backupOptions.FilesFromRaw, "files-from-raw", nil, "read the files to backup from `file` (can be combined with file args; can be specified multiple times)")
f.StringVar(&backupOptions.TimeStamp, "time", "", "`time` of the backup (ex. '2012-11-01 22:08:41') (default: now)")
f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories")
f.BoolVar(&backupOptions.IgnoreInode, "ignore-inode", false, "ignore inode number changes when checking for modified files")
f.BoolVar(&backupOptions.IgnoreCtime, "ignore-ctime", false, "ignore ctime changes when checking for modified files")
f.BoolVarP(&backupOptions.DryRun, "dry-run", "n", false, "do not upload or write any data, just show what would be done")
f.BoolVar(&backupOptions.NoScan, "no-scan", false, "do not run scanner to estimate size of backup")
if runtime.GOOS == "windows" {
f.BoolVar(&backupOptions.UseFsSnapshot, "use-fs-snapshot", false, "use filesystem snapshot where possible (currently only Windows VSS)")
}
// parse read concurrency from env, on error the default value will be used
readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32)
backupOptions.ReadConcurrency = uint(readConcurrency)
}
// filterExisting returns a slice of all existing items, or an error if no
// items exist at all.
func filterExisting(items []string) (result []string, err error) {
for _, item := range items {
_, err := fs.Lstat(item)
if errors.Is(err, os.ErrNotExist) {
Warnf("%v does not exist, skipping\n", item)
continue
}
result = append(result, item)
}
if len(result) == 0 {
return nil, errors.Fatal("all target directories/files do not exist")
}
return
}
// readLines reads all lines from the named file and returns them as a
// string slice.
//
// If filename is empty, readPatternsFromFile returns an empty slice.
// If filename is a dash (-), readPatternsFromFile will read the lines from the
// standard input.
func readLines(filename string) ([]string, error) {
if filename == "" {
return nil, nil
}
var (
data []byte
err error
)
if filename == "-" {
data, err = io.ReadAll(os.Stdin)
} else {
data, err = textfile.Read(filename)
}
if err != nil {
return nil, err
}
var lines []string
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}
// readFilenamesFromFileRaw reads a list of filenames from the given file,
// or stdin if filename is "-". Each filename is terminated by a zero byte,
// which is stripped off.
func readFilenamesFromFileRaw(filename string) (names []string, err error) {
f := os.Stdin
if filename != "-" {
if f, err = os.Open(filename); err != nil {
return nil, err
}
}
names, err = readFilenamesRaw(f)
if err != nil {
// ignore subsequent errors
_ = f.Close()
return nil, err
}
err = f.Close()
if err != nil {
return nil, err
}
return names, nil
}
func readFilenamesRaw(r io.Reader) (names []string, err error) {
br := bufio.NewReader(r)
for {
name, err := br.ReadString(0)
switch err {
case nil:
case io.EOF:
if name == "" {
return names, nil
}
return nil, errors.Fatal("--files-from-raw: trailing zero byte missing")
default:
return nil, err
}
name = name[:len(name)-1]
if name == "" {
// The empty filename is never valid. Handle this now to
// prevent downstream code from erroneously backing up
// filepath.Clean("") == ".".
return nil, errors.Fatal("--files-from-raw: empty filename in listing")
}
names = append(names, name)
}
}
// Check returns an error when an invalid combination of options was set.
func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
if gopts.password == "" {
if opts.Stdin {
return errors.Fatal("cannot read both password and data from stdin")
}
filesFrom := append(append(opts.FilesFrom, opts.FilesFromVerbatim...), opts.FilesFromRaw...)
for _, filename := range filesFrom {
if filename == "-" {
return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD")
}
}
}
if opts.Stdin || opts.StdinCommand {
if len(opts.FilesFrom) > 0 {
return errors.Fatal("--stdin and --files-from cannot be used together")
}
if len(opts.FilesFromVerbatim) > 0 {
return errors.Fatal("--stdin and --files-from-verbatim cannot be used together")
}
if len(opts.FilesFromRaw) > 0 {
return errors.Fatal("--stdin and --files-from-raw cannot be used together")
}
if len(args) > 0 && !opts.StdinCommand {
return errors.Fatal("--stdin was specified and files/dirs were listed as arguments")
}
}
return nil
}
// collectRejectByNameFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path only
func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []RejectByNameFunc, err error) {
// exclude restic cache
if repo.Cache != nil {
f, err := rejectResticCache(repo)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
fsPatterns, err := opts.excludePatternOptions.CollectPatterns()
if err != nil {
return nil, err
}
fs = append(fs, fsPatterns...)
if opts.ExcludeCaches {
opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55")
}
for _, spec := range opts.ExcludeIfPresent {
f, err := rejectIfPresent(spec)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
return fs, nil
}
// collectRejectFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path and file info
func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, err error) {
// allowed devices
if opts.ExcludeOtherFS && !opts.Stdin {
f, err := rejectByDevice(targets)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin {
f, err := rejectBySize(opts.ExcludeLargerThan)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
return fs, nil
}
// collectTargets returns a list of target files/dirs from several sources.
func collectTargets(opts BackupOptions, args []string) (targets []string, err error) {
if opts.Stdin || opts.StdinCommand {
return nil, nil
}
for _, file := range opts.FilesFrom {
fromfile, err := readLines(file)
if err != nil {
return nil, err
}
// expand wildcards
for _, line := range fromfile {
line = strings.TrimSpace(line)
if line == "" || line[0] == '#' { // '#' marks a comment.
continue
}
var expanded []string
expanded, err := filepath.Glob(line)
if err != nil {
return nil, fmt.Errorf("pattern: %s: %w", line, err)
}
if len(expanded) == 0 {
Warnf("pattern %q does not match any files, skipping\n", line)
}
targets = append(targets, expanded...)
}
}
for _, file := range opts.FilesFromVerbatim {
fromfile, err := readLines(file)
if err != nil {
return nil, err
}
for _, line := range fromfile {
if line == "" {
continue
}
targets = append(targets, line)
}
}
for _, file := range opts.FilesFromRaw {
fromfile, err := readFilenamesFromFileRaw(file)
if err != nil {
return nil, err
}
targets = append(targets, fromfile...)
}
// Merge args into files-from so we can reuse the normal args checks
// and have the ability to use both files-from and args at the same time.
targets = append(targets, args...)
if len(targets) == 0 && !opts.Stdin {
return nil, errors.Fatal("nothing to backup, please specify target files/dirs")
}
targets, err = filterExisting(targets)
if err != nil {
return nil, err
}
return targets, nil
}
// parent returns the ID of the parent snapshot. If there is none, nil is
// returned.
func findParentSnapshot(ctx context.Context, repo restic.Repository, opts BackupOptions, targets []string, timeStampLimit time.Time) (*restic.Snapshot, error) {
if opts.Force {
return nil, nil
}
snName := opts.Parent
if snName == "" {
snName = "latest"
}
f := restic.SnapshotFilter{TimestampLimit: timeStampLimit}
if opts.GroupBy.Host {
f.Hosts = []string{opts.Host}
}
if opts.GroupBy.Path {
f.Paths = targets
}
if opts.GroupBy.Tag {
f.Tags = []restic.TagList{opts.Tags.Flatten()}
}
sn, _, err := f.FindLatest(ctx, repo, repo, snName)
// Snapshot not found is ok if no explicit parent was set
if opts.Parent == "" && errors.Is(err, restic.ErrNoSnapshotFound) {
err = nil
}
return sn, err
}
func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
err := opts.Check(gopts, args)
if err != nil {
return err
}
targets, err := collectTargets(opts, args)
if err != nil {
return err
}
timeStamp := time.Now()
if opts.TimeStamp != "" {
timeStamp, err = time.ParseInLocation(TimeFormat, opts.TimeStamp, time.Local)
if err != nil {
return errors.Fatalf("error in time option: %v\n", err)
}
}
if gopts.verbosity >= 2 && !gopts.JSON {
Verbosef("open repository\n")
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
var progressPrinter backup.ProgressPrinter
if gopts.JSON {
progressPrinter = backup.NewJSONProgress(term, gopts.verbosity)
} else {
progressPrinter = backup.NewTextProgress(term, gopts.verbosity)
}
progressReporter := backup.NewProgress(progressPrinter,
calculateProgressInterval(!gopts.Quiet, gopts.JSON))
defer progressReporter.Done()
if opts.DryRun {
repo.SetDryRun()
}
if !gopts.JSON {
progressPrinter.V("lock repository")
}
if !opts.DryRun {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo)
if err != nil {
return err
}
// rejectFuncs collect functions that can reject items from the backup based on path and file info
rejectFuncs, err := collectRejectFuncs(opts, targets)
if err != nil {
return err
}
var parentSnapshot *restic.Snapshot
if !opts.Stdin {
parentSnapshot, err = findParentSnapshot(ctx, repo, opts, targets, timeStamp)
if err != nil {
return err
}
if !gopts.JSON {
if parentSnapshot != nil {
progressPrinter.P("using parent snapshot %v\n", parentSnapshot.ID().Str())
} else {
progressPrinter.P("no parent snapshot found, will read all files\n")
}
}
}
if !gopts.JSON {
progressPrinter.V("load index files")
}
bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term)
err = repo.LoadIndex(ctx, bar)
if err != nil {
return err
}
selectByNameFilter := func(item string) bool {
for _, reject := range rejectByNameFuncs {
if reject(item) {
return false
}
}
return true
}
selectFilter := func(item string, fi os.FileInfo) bool {
for _, reject := range rejectFuncs {
if reject(item, fi) {
return false
}
}
return true
}
var targetFS fs.FS = fs.Local{}
if runtime.GOOS == "windows" && opts.UseFsSnapshot {
if err = fs.HasSufficientPrivilegesForVSS(); err != nil {
return err
}
errorHandler := func(item string, err error) error {
return progressReporter.Error(item, err)
}
messageHandler := func(msg string, args ...interface{}) {
if !gopts.JSON {
progressPrinter.P(msg, args...)
}
}
localVss := fs.NewLocalVss(errorHandler, messageHandler)
defer localVss.DeleteSnapshots()
targetFS = localVss
}
if opts.Stdin || opts.StdinCommand {
if !gopts.JSON {
progressPrinter.V("read data from stdin")
}
filename := path.Join("/", opts.StdinFilename)
var source io.ReadCloser = os.Stdin
if opts.StdinCommand {
source, err = fs.NewCommandReader(ctx, args, globalOptions.stderr)
if err != nil {
return err
}
}
targetFS = &fs.Reader{
ModTime: timeStamp,
Name: filename,
Mode: 0644,
ReadCloser: source,
}
targets = []string{filename}
}
wg, wgCtx := errgroup.WithContext(ctx)
cancelCtx, cancel := context.WithCancel(wgCtx)
defer cancel()
if !opts.NoScan {
sc := archiver.NewScanner(targetFS)
sc.SelectByName = selectByNameFilter
sc.Select = selectFilter
sc.Error = progressPrinter.ScannerError
sc.Result = progressReporter.ReportTotal
if !gopts.JSON {
progressPrinter.V("start scan on %v", targets)
}
wg.Go(func() error { return sc.Scan(cancelCtx, targets) })
}
arch := archiver.New(repo, targetFS, archiver.Options{ReadConcurrency: backupOptions.ReadConcurrency})
arch.SelectByName = selectByNameFilter
arch.Select = selectFilter
arch.WithAtime = opts.WithAtime
success := true
arch.Error = func(item string, err error) error {
success = false
reterr := progressReporter.Error(item, err)
// If we receive a fatal error during the execution of the snapshot,
// we abort the snapshot.
if reterr == nil && errors.IsFatal(err) {
reterr = err
}
return reterr
}
arch.CompleteItem = progressReporter.CompleteItem
arch.StartFile = progressReporter.StartFile
arch.CompleteBlob = progressReporter.CompleteBlob
if opts.IgnoreInode {
// --ignore-inode implies --ignore-ctime: on FUSE, the ctime is not
// reliable either.
arch.ChangeIgnoreFlags |= archiver.ChangeIgnoreCtime | archiver.ChangeIgnoreInode
}
if opts.IgnoreCtime {
arch.ChangeIgnoreFlags |= archiver.ChangeIgnoreCtime
}
snapshotOpts := archiver.SnapshotOptions{
Excludes: opts.Excludes,
Tags: opts.Tags.Flatten(),
Time: timeStamp,
Hostname: opts.Host,
ParentSnapshot: parentSnapshot,
ProgramVersion: "restic " + version,
}
if !gopts.JSON {
progressPrinter.V("start backup on %v", targets)
}
_, id, err := arch.Snapshot(ctx, targets, snapshotOpts)
// cleanly shutdown all running goroutines
cancel()
// let's see if one returned an error
werr := wg.Wait()
// return original error
if err != nil {
return errors.Fatalf("unable to save snapshot: %v", err)
}
// Report finished execution
progressReporter.Finish(id, opts.DryRun)
if !gopts.JSON && !opts.DryRun {
progressPrinter.P("snapshot %s saved\n", id.Str())
}
if !success {
return ErrInvalidSourceData
}
// Return error if any
return werr
}
| package main
import (
"bytes"
"fmt"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestCollectTargets(t *testing.T) {
dir := rtest.TempDir(t)
fooSpace := "foo "
barStar := "bar*" // Must sort before the others, below.
if runtime.GOOS == "windows" { // Doesn't allow "*" or trailing space.
fooSpace = "foo"
barStar = "bar"
}
var expect []string
for _, filename := range []string{
barStar, "baz", "cmdline arg", fooSpace,
"fromfile", "fromfile-raw", "fromfile-verbatim", "quux",
} {
// All mentioned files must exist for collectTargets.
f, err := os.Create(filepath.Join(dir, filename))
rtest.OK(t, err)
rtest.OK(t, f.Close())
expect = append(expect, f.Name())
}
f1, err := os.Create(filepath.Join(dir, "fromfile"))
rtest.OK(t, err)
// Empty lines should be ignored. A line starting with '#' is a comment.
fmt.Fprintf(f1, "\n%s*\n # here's a comment\n", f1.Name())
rtest.OK(t, f1.Close())
f2, err := os.Create(filepath.Join(dir, "fromfile-verbatim"))
rtest.OK(t, err)
for _, filename := range []string{fooSpace, barStar} {
// Empty lines should be ignored. CR+LF is allowed.
fmt.Fprintf(f2, "%s\r\n\n", filepath.Join(dir, filename))
}
rtest.OK(t, f2.Close())
f3, err := os.Create(filepath.Join(dir, "fromfile-raw"))
rtest.OK(t, err)
for _, filename := range []string{"baz", "quux"} {
fmt.Fprintf(f3, "%s\x00", filepath.Join(dir, filename))
}
rtest.OK(t, err)
rtest.OK(t, f3.Close())
opts := BackupOptions{
FilesFrom: []string{f1.Name()},
FilesFromVerbatim: []string{f2.Name()},
FilesFromRaw: []string{f3.Name()},
}
targets, err := collectTargets(opts, []string{filepath.Join(dir, "cmdline arg")})
rtest.OK(t, err)
sort.Strings(targets)
rtest.Equals(t, expect, targets)
}
func TestReadFilenamesRaw(t *testing.T) {
// These should all be returned exactly as-is.
expected := []string{
"\xef\xbb\xbf/utf-8-bom",
"/absolute",
"../.././relative",
"\t\t leading and trailing space \t\t",
"newline\nin filename",
"not UTF-8: \x80\xff/simple",
` / *[]* \ `,
}
var buf bytes.Buffer
for _, name := range expected {
buf.WriteString(name)
buf.WriteByte(0)
}
got, err := readFilenamesRaw(&buf)
rtest.OK(t, err)
rtest.Equals(t, expected, got)
// Empty input is ok.
got, err = readFilenamesRaw(strings.NewReader(""))
rtest.OK(t, err)
rtest.Equals(t, 0, len(got))
// An empty filename is an error.
_, err = readFilenamesRaw(strings.NewReader("foo\x00\x00"))
rtest.Assert(t, err != nil, "no error for zero byte")
rtest.Assert(t, strings.Contains(err.Error(), "empty filename"),
"wrong error message: %v", err.Error())
// No trailing NUL byte is an error, because it likely means we're
// reading a line-oriented text file (someone forgot -print0).
_, err = readFilenamesRaw(strings.NewReader("simple.txt"))
rtest.Assert(t, err != nil, "no error for zero byte")
rtest.Assert(t, strings.Contains(err.Error(), "zero byte"),
"wrong error message: %v", err.Error())
}
| 215,468 |
Review the shared code context and generate a suite of multiple unit tests for the functions in shared code context using the detected test framework and libraries.
Code context:
package main
import (
"context"
"math/rand"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/spf13/cobra"
"github.com/restic/restic/internal/cache"
"github.com/restic/restic/internal/checker"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui"
)
var cmdCheck = &cobra.Command{
Use: "check [flags]",
Short: "Check the repository for errors",
Long: `
The "check" command tests the repository for errors and reports any errors it
finds. It can also be used to read all data and therefore simulate a restore.
By default, the "check" command will always load all data directly from the
repository and not use a local cache.
EXIT STATUS
===========
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runCheck(cmd.Context(), checkOptions, globalOptions, args)
},
PreRunE: func(cmd *cobra.Command, args []string) error {
return checkFlags(checkOptions)
},
}
// CheckOptions bundles all options for the 'check' command.
type CheckOptions struct {
ReadData bool
ReadDataSubset string
CheckUnused bool
WithCache bool
}
var checkOptions CheckOptions
func init() {
cmdRoot.AddCommand(cmdCheck)
f := cmdCheck.Flags()
f.BoolVar(&checkOptions.ReadData, "read-data", false, "read all data blobs")
f.StringVar(&checkOptions.ReadDataSubset, "read-data-subset", "", "read a `subset` of data packs, specified as 'n/t' for specific part, or either 'x%' or 'x.y%' or a size in bytes with suffixes k/K, m/M, g/G, t/T for a random subset")
var ignored bool
f.BoolVar(&ignored, "check-unused", false, "find unused blobs")
err := f.MarkDeprecated("check-unused", "`--check-unused` is deprecated and will be ignored")
if err != nil {
// MarkDeprecated only returns an error when the flag is not found
panic(err)
}
f.BoolVar(&checkOptions.WithCache, "with-cache", false, "use existing cache, only read uncached data from repository")
}
func checkFlags(opts CheckOptions) error {
if opts.ReadData && opts.ReadDataSubset != "" {
return errors.Fatal("check flags --read-data and --read-data-subset cannot be used together")
}
if opts.ReadDataSubset != "" {
dataSubset, err := stringToIntSlice(opts.ReadDataSubset)
argumentError := errors.Fatal("check flag --read-data-subset has invalid value, please see documentation")
if err == nil {
if len(dataSubset) != 2 {
return argumentError
}
if dataSubset[0] == 0 || dataSubset[1] == 0 || dataSubset[0] > dataSubset[1] {
return errors.Fatal("check flag --read-data-subset=n/t values must be positive integers, and n <= t, e.g. --read-data-subset=1/2")
}
if dataSubset[1] > totalBucketsMax {
return errors.Fatalf("check flag --read-data-subset=n/t t must be at most %d", totalBucketsMax)
}
} else if strings.HasSuffix(opts.ReadDataSubset, "%") {
percentage, err := parsePercentage(opts.ReadDataSubset)
if err != nil {
return argumentError
}
if percentage <= 0.0 || percentage > 100.0 {
return errors.Fatal(
"check flag --read-data-subset=x% x must be above 0.0% and at most 100.0%")
}
} else {
fileSize, err := ui.ParseBytes(opts.ReadDataSubset)
if err != nil {
return argumentError
}
if fileSize <= 0.0 {
return errors.Fatal(
"check flag --read-data-subset=n n must be above 0")
}
}
}
return nil
}
// See doReadData in runCheck below for why this is 256.
const totalBucketsMax = 256
// stringToIntSlice converts string to []uint, using '/' as element separator
func stringToIntSlice(param string) (split []uint, err error) {
if param == "" {
return nil, nil
}
parts := strings.Split(param, "/")
result := make([]uint, len(parts))
for idx, part := range parts {
uintval, err := strconv.ParseUint(part, 10, 0)
if err != nil {
return nil, err
}
result[idx] = uint(uintval)
}
return result, nil
}
// ParsePercentage parses a percentage string of the form "X%" where X is a float constant,
// and returns the value of that constant. It does not check the range of the value.
func parsePercentage(s string) (float64, error) {
if !strings.HasSuffix(s, "%") {
return 0, errors.Errorf(`parsePercentage: %q does not end in "%%"`, s)
}
s = s[:len(s)-1]
p, err := strconv.ParseFloat(s, 64)
if err != nil {
return 0, errors.Errorf("parsePercentage: %v", err)
}
return p, nil
}
// prepareCheckCache configures a special cache directory for check.
//
// - if --with-cache is specified, the default cache is used
// - if the user explicitly requested --no-cache, we don't use any cache
// - if the user provides --cache-dir, we use a cache in a temporary sub-directory of the specified directory and the sub-directory is deleted after the check
// - by default, we use a cache in a temporary directory that is deleted after the check
func prepareCheckCache(opts CheckOptions, gopts *GlobalOptions) (cleanup func()) {
cleanup = func() {}
if opts.WithCache {
// use the default cache, no setup needed
return cleanup
}
if gopts.NoCache {
// don't use any cache, no setup needed
return cleanup
}
cachedir := gopts.CacheDir
if cachedir == "" {
cachedir = cache.EnvDir()
}
// use a cache in a temporary directory
tempdir, err := os.MkdirTemp(cachedir, "restic-check-cache-")
if err != nil {
// if an error occurs, don't use any cache
Warnf("unable to create temporary directory for cache during check, disabling cache: %v\n", err)
gopts.NoCache = true
return cleanup
}
gopts.CacheDir = tempdir
Verbosef("using temporary cache in %v\n", tempdir)
cleanup = func() {
err := fs.RemoveAll(tempdir)
if err != nil {
Warnf("error removing temporary cache directory: %v\n", err)
}
}
return cleanup
}
func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args []string) error {
if len(args) != 0 {
return errors.Fatal("the check command expects no arguments, only options - please see `restic help check` for usage and flags")
}
cleanup := prepareCheckCache(opts, &gopts)
AddCleanupHandler(func(code int) (int, error) {
cleanup()
return code, nil
})
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
if !gopts.NoLock {
Verbosef("create exclusive lock for repository\n")
var lock *restic.Lock
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
chkr := checker.New(repo, opts.CheckUnused)
err = chkr.LoadSnapshots(ctx)
if err != nil {
return err
}
Verbosef("load indexes\n")
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
hints, errs := chkr.LoadIndex(ctx, bar)
errorsFound := false
suggestIndexRebuild := false
mixedFound := false
for _, hint := range hints {
switch hint.(type) {
case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat:
Printf("%v\n", hint)
suggestIndexRebuild = true
case *checker.ErrMixedPack:
Printf("%v\n", hint)
mixedFound = true
default:
Warnf("error: %v\n", hint)
errorsFound = true
}
}
if suggestIndexRebuild {
Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n")
}
if mixedFound {
Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n")
}
if len(errs) > 0 {
for _, err := range errs {
Warnf("error: %v\n", err)
}
return errors.Fatal("LoadIndex returned errors")
}
orphanedPacks := 0
errChan := make(chan error)
Verbosef("check all packs\n")
go chkr.Packs(ctx, errChan)
for err := range errChan {
if checker.IsOrphanedPack(err) {
orphanedPacks++
Verbosef("%v\n", err)
} else if err == checker.ErrLegacyLayout {
Verbosef("repository still uses the S3 legacy layout\nPlease run `restic migrate s3legacy` to correct this.\n")
} else {
errorsFound = true
Warnf("%v\n", err)
}
}
if orphanedPacks > 0 {
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks)
}
Verbosef("check snapshots, trees and blobs\n")
errChan = make(chan error)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
bar := newProgressMax(!gopts.Quiet, 0, "snapshots")
defer bar.Done()
chkr.Structure(ctx, bar, errChan)
}()
for err := range errChan {
errorsFound = true
if e, ok := err.(*checker.TreeError); ok {
var clean string
if stdoutCanUpdateStatus() {
clean = clearLine(0)
}
Warnf(clean+"error for tree %v:\n", e.ID.Str())
for _, treeErr := range e.Errors {
Warnf(" %v\n", treeErr)
}
} else {
Warnf("error: %v\n", err)
}
}
// Wait for the progress bar to be complete before printing more below.
// Must happen after `errChan` is read from in the above loop to avoid
// deadlocking in the case of errors.
wg.Wait()
if opts.CheckUnused {
for _, id := range chkr.UnusedBlobs(ctx) {
Verbosef("unused blob %v\n", id)
errorsFound = true
}
}
doReadData := func(packs map[restic.ID]int64) {
packCount := uint64(len(packs))
p := newProgressMax(!gopts.Quiet, packCount, "packs")
errChan := make(chan error)
go chkr.ReadPacks(ctx, packs, p, errChan)
var salvagePacks restic.IDs
for err := range errChan {
errorsFound = true
Warnf("%v\n", err)
if err, ok := err.(*checker.ErrPackData); ok {
if strings.Contains(err.Error(), "wrong data returned, hash is") {
salvagePacks = append(salvagePacks, err.PackID)
}
}
}
p.Done()
if len(salvagePacks) > 0 {
Warnf("\nThe repository contains pack files with damaged blobs. These blobs must be removed to repair the repository. This can be done using the following commands:\n\n")
var strIds []string
for _, id := range salvagePacks {
strIds = append(strIds, id.String())
}
Warnf("RESTIC_FEATURES=repair-packs-v1 restic repair packs %v\nrestic repair snapshots --forget\n\n", strings.Join(strIds, " "))
Warnf("Corrupted blobs are either caused by hardware problems or bugs in restic. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting!\n")
}
}
switch {
case opts.ReadData:
Verbosef("read all data\n")
doReadData(selectPacksByBucket(chkr.GetPacks(), 1, 1))
case opts.ReadDataSubset != "":
var packs map[restic.ID]int64
dataSubset, err := stringToIntSlice(opts.ReadDataSubset)
if err == nil {
bucket := dataSubset[0]
totalBuckets := dataSubset[1]
packs = selectPacksByBucket(chkr.GetPacks(), bucket, totalBuckets)
packCount := uint64(len(packs))
Verbosef("read group #%d of %d data packs (out of total %d packs in %d groups)\n", bucket, packCount, chkr.CountPacks(), totalBuckets)
} else if strings.HasSuffix(opts.ReadDataSubset, "%") {
percentage, err := parsePercentage(opts.ReadDataSubset)
if err == nil {
packs = selectRandomPacksByPercentage(chkr.GetPacks(), percentage)
Verbosef("read %.1f%% of data packs\n", percentage)
}
} else {
repoSize := int64(0)
allPacks := chkr.GetPacks()
for _, size := range allPacks {
repoSize += size
}
if repoSize == 0 {
return errors.Fatal("Cannot read from a repository having size 0")
}
subsetSize, _ := ui.ParseBytes(opts.ReadDataSubset)
if subsetSize > repoSize {
subsetSize = repoSize
}
packs = selectRandomPacksByFileSize(chkr.GetPacks(), subsetSize, repoSize)
Verbosef("read %d bytes of data packs\n", subsetSize)
}
if packs == nil {
return errors.Fatal("internal error: failed to select packs to check")
}
doReadData(packs)
}
if errorsFound {
return errors.Fatal("repository contains errors")
}
Verbosef("no errors were found\n")
return nil
}
// selectPacksByBucket selects subsets of packs by ranges of buckets.
func selectPacksByBucket(allPacks map[restic.ID]int64, bucket, totalBuckets uint) map[restic.ID]int64 {
packs := make(map[restic.ID]int64)
for pack, size := range allPacks {
// If we ever check more than the first byte
// of pack, update totalBucketsMax.
if (uint(pack[0]) % totalBuckets) == (bucket - 1) {
packs[pack] = size
}
}
return packs
}
// selectRandomPacksByPercentage selects the given percentage of packs which are randomly chosen.
func selectRandomPacksByPercentage(allPacks map[restic.ID]int64, percentage float64) map[restic.ID]int64 {
packCount := len(allPacks)
packsToCheck := int(float64(packCount) * (percentage / 100.0))
if packCount > 0 && packsToCheck < 1 {
packsToCheck = 1
}
timeNs := time.Now().UnixNano()
r := rand.New(rand.NewSource(timeNs))
idx := r.Perm(packCount)
var keys []restic.ID
for k := range allPacks {
keys = append(keys, k)
}
packs := make(map[restic.ID]int64)
for i := 0; i < packsToCheck; i++ {
id := keys[idx[i]]
packs[id] = allPacks[id]
}
return packs
}
func selectRandomPacksByFileSize(allPacks map[restic.ID]int64, subsetSize int64, repoSize int64) map[restic.ID]int64 {
subsetPercentage := (float64(subsetSize) / float64(repoSize)) * 100.0
packs := selectRandomPacksByPercentage(allPacks, subsetPercentage)
return packs
}
| package main
import (
"math"
"reflect"
"testing"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestParsePercentage(t *testing.T) {
testCases := []struct {
input string
output float64
expectError bool
}{
{"0%", 0.0, false},
{"1%", 1.0, false},
{"100%", 100.0, false},
{"123%", 123.0, false},
{"123.456%", 123.456, false},
{"0.742%", 0.742, false},
{"-100%", -100.0, false},
{" 1%", 0.0, true},
{"1 %", 0.0, true},
{"1% ", 0.0, true},
}
for _, testCase := range testCases {
output, err := parsePercentage(testCase.input)
if testCase.expectError {
rtest.Assert(t, err != nil, "Expected error for case %s", testCase.input)
rtest.Assert(t, output == 0.0, "Expected output to be 0.0, got %s", output)
} else {
rtest.Assert(t, err == nil, "Expected no error for case %s", testCase.input)
rtest.Assert(t, math.Abs(testCase.output-output) < 0.00001, "Expected %f, got %f",
testCase.output, output)
}
}
}
func TestStringToIntSlice(t *testing.T) {
testCases := []struct {
input string
output []uint
expectError bool
}{
{"3/5", []uint{3, 5}, false},
{"1/100", []uint{1, 100}, false},
{"abc", nil, true},
{"1/a", nil, true},
{"/", nil, true},
}
for _, testCase := range testCases {
output, err := stringToIntSlice(testCase.input)
if testCase.expectError {
rtest.Assert(t, err != nil, "Expected error for case %s", testCase.input)
rtest.Assert(t, output == nil, "Expected output to be nil, got %s", output)
} else {
rtest.Assert(t, err == nil, "Expected no error for case %s", testCase.input)
rtest.Assert(t, len(output) == 2, "Invalid output length for case %s", testCase.input)
rtest.Assert(t, reflect.DeepEqual(output, testCase.output), "Expected %f, got %f",
testCase.output, output)
}
}
}
func TestSelectPacksByBucket(t *testing.T) {
var testPacks = make(map[restic.ID]int64)
for i := 1; i <= 10; i++ {
id := restic.NewRandomID()
// ensure relevant part of generated id is reproducible
id[0] = byte(i)
testPacks[id] = 0
}
selectedPacks := selectPacksByBucket(testPacks, 0, 10)
rtest.Assert(t, len(selectedPacks) == 0, "Expected 0 selected packs")
for i := uint(1); i <= 5; i++ {
selectedPacks = selectPacksByBucket(testPacks, i, 5)
rtest.Assert(t, len(selectedPacks) == 2, "Expected 2 selected packs")
}
selectedPacks = selectPacksByBucket(testPacks, 1, 1)
rtest.Assert(t, len(selectedPacks) == 10, "Expected 10 selected packs")
for testPack := range testPacks {
_, ok := selectedPacks[testPack]
rtest.Assert(t, ok, "Expected input and output to be equal")
}
}
func TestSelectRandomPacksByPercentage(t *testing.T) {
var testPacks = make(map[restic.ID]int64)
for i := 1; i <= 10; i++ {
testPacks[restic.NewRandomID()] = 0
}
selectedPacks := selectRandomPacksByPercentage(testPacks, 0.0)
rtest.Assert(t, len(selectedPacks) == 1, "Expected 1 selected packs")
selectedPacks = selectRandomPacksByPercentage(testPacks, 10.0)
rtest.Assert(t, len(selectedPacks) == 1, "Expected 1 selected pack")
for pack := range selectedPacks {
_, ok := testPacks[pack]
rtest.Assert(t, ok, "Unexpected selection")
}
selectedPacks = selectRandomPacksByPercentage(testPacks, 50.0)
rtest.Assert(t, len(selectedPacks) == 5, "Expected 5 selected packs")
for pack := range selectedPacks {
_, ok := testPacks[pack]
rtest.Assert(t, ok, "Unexpected item in selection")
}
selectedPacks = selectRandomPacksByPercentage(testPacks, 100.0)
rtest.Assert(t, len(selectedPacks) == 10, "Expected 10 selected packs")
for testPack := range testPacks {
_, ok := selectedPacks[testPack]
rtest.Assert(t, ok, "Expected input and output to be equal")
}
}
func TestSelectNoRandomPacksByPercentage(t *testing.T) {
// that the repository without pack files works
var testPacks = make(map[restic.ID]int64)
selectedPacks := selectRandomPacksByPercentage(testPacks, 10.0)
rtest.Assert(t, len(selectedPacks) == 0, "Expected 0 selected packs")
}
func TestSelectRandomPacksByFileSize(t *testing.T) {
var testPacks = make(map[restic.ID]int64)
for i := 1; i <= 10; i++ {
id := restic.NewRandomID()
// ensure unique ids
id[0] = byte(i)
testPacks[id] = 0
}
selectedPacks := selectRandomPacksByFileSize(testPacks, 10, 500)
rtest.Assert(t, len(selectedPacks) == 1, "Expected 1 selected packs")
selectedPacks = selectRandomPacksByFileSize(testPacks, 10240, 51200)
rtest.Assert(t, len(selectedPacks) == 2, "Expected 2 selected packs")
for pack := range selectedPacks {
_, ok := testPacks[pack]
rtest.Assert(t, ok, "Unexpected selection")
}
selectedPacks = selectRandomPacksByFileSize(testPacks, 500, 500)
rtest.Assert(t, len(selectedPacks) == 10, "Expected 10 selected packs")
for pack := range selectedPacks {
_, ok := testPacks[pack]
rtest.Assert(t, ok, "Unexpected item in selection")
}
}
func TestSelectNoRandomPacksByFileSize(t *testing.T) {
// that the repository without pack files works
var testPacks = make(map[restic.ID]int64)
selectedPacks := selectRandomPacksByFileSize(testPacks, 10, 500)
rtest.Assert(t, len(selectedPacks) == 0, "Expected 0 selected packs")
}
| 215,469 |
Review the shared code context and generate a suite of multiple unit tests for the functions in shared code context using the detected test framework and libraries.
Code context:
package main
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"strings"
"github.com/restic/chunker"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/restorer"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/table"
"github.com/restic/restic/internal/walker"
"github.com/minio/sha256-simd"
"github.com/spf13/cobra"
)
var cmdStats = &cobra.Command{
Use: "stats [flags] [snapshot ID] [...]",
Short: "Scan the repository and show basic statistics",
Long: `
The "stats" command walks one or multiple snapshots in a repository
and accumulates statistics about the data stored therein. It reports
on the number of unique files and their sizes, according to one of
the counting modes as given by the --mode flag.
It operates on all snapshots matching the selection criteria or all
snapshots if nothing is specified. The special snapshot ID "latest"
is also supported. Some modes make more sense over
just a single snapshot, while others are useful across all snapshots,
depending on what you are trying to calculate.
The modes are:
* restore-size: (default) Counts the size of the restored files.
* files-by-contents: Counts total size of files, where a file is
considered unique if it has unique contents.
* raw-data: Counts the size of blobs in the repository, regardless of
how many files reference them.
* blobs-per-file: A combination of files-by-contents and raw-data.
Refer to the online manual for more details about each mode.
EXIT STATUS
===========
Exit status is 0 if the command was successful, and non-zero if there was any error.
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runStats(cmd.Context(), statsOptions, globalOptions, args)
},
}
// StatsOptions collects all options for the stats command.
type StatsOptions struct {
// the mode of counting to perform (see consts for available modes)
countMode string
restic.SnapshotFilter
}
var statsOptions StatsOptions
func init() {
cmdRoot.AddCommand(cmdStats)
f := cmdStats.Flags()
f.StringVar(&statsOptions.countMode, "mode", countModeRestoreSize, "counting mode: restore-size (default), files-by-contents, blobs-per-file or raw-data")
initMultiSnapshotFilter(f, &statsOptions.SnapshotFilter, true)
}
func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args []string) error {
err := verifyStatsInput(opts)
if err != nil {
return err
}
repo, err := OpenRepository(ctx, gopts)
if err != nil {
return err
}
if !gopts.NoLock {
var lock *restic.Lock
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
defer unlockRepo(lock)
if err != nil {
return err
}
}
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
if err != nil {
return err
}
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
if err = repo.LoadIndex(ctx, bar); err != nil {
return err
}
if opts.countMode == countModeDebug {
return statsDebug(ctx, repo)
}
if !gopts.JSON {
Printf("scanning...\n")
}
// create a container for the stats (and other needed state)
stats := &statsContainer{
uniqueFiles: make(map[fileID]struct{}),
fileBlobs: make(map[string]restic.IDSet),
blobs: restic.NewBlobSet(),
SnapshotsCount: 0,
}
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, args) {
err = statsWalkSnapshot(ctx, sn, repo, opts, stats)
if err != nil {
return fmt.Errorf("error walking snapshot: %v", err)
}
}
if err != nil {
return err
}
if opts.countMode == countModeRawData {
// the blob handles have been collected, but not yet counted
for blobHandle := range stats.blobs {
pbs := repo.Index().Lookup(blobHandle)
if len(pbs) == 0 {
return fmt.Errorf("blob %v not found", blobHandle)
}
stats.TotalSize += uint64(pbs[0].Length)
if repo.Config().Version >= 2 {
stats.TotalUncompressedSize += uint64(crypto.CiphertextLength(int(pbs[0].DataLength())))
if pbs[0].IsCompressed() {
stats.TotalCompressedBlobsSize += uint64(pbs[0].Length)
stats.TotalCompressedBlobsUncompressedSize += uint64(crypto.CiphertextLength(int(pbs[0].DataLength())))
}
}
stats.TotalBlobCount++
}
if stats.TotalCompressedBlobsSize > 0 {
stats.CompressionRatio = float64(stats.TotalCompressedBlobsUncompressedSize) / float64(stats.TotalCompressedBlobsSize)
}
if stats.TotalUncompressedSize > 0 {
stats.CompressionProgress = float64(stats.TotalCompressedBlobsUncompressedSize) / float64(stats.TotalUncompressedSize) * 100
stats.CompressionSpaceSaving = (1 - float64(stats.TotalSize)/float64(stats.TotalUncompressedSize)) * 100
}
}
if gopts.JSON {
err = json.NewEncoder(globalOptions.stdout).Encode(stats)
if err != nil {
return fmt.Errorf("encoding output: %v", err)
}
return nil
}
Printf("Stats in %s mode:\n", opts.countMode)
Printf(" Snapshots processed: %d\n", stats.SnapshotsCount)
if stats.TotalBlobCount > 0 {
Printf(" Total Blob Count: %d\n", stats.TotalBlobCount)
}
if stats.TotalFileCount > 0 {
Printf(" Total File Count: %d\n", stats.TotalFileCount)
}
if stats.TotalUncompressedSize > 0 {
Printf(" Total Uncompressed Size: %-5s\n", ui.FormatBytes(stats.TotalUncompressedSize))
}
Printf(" Total Size: %-5s\n", ui.FormatBytes(stats.TotalSize))
if stats.CompressionProgress > 0 {
Printf(" Compression Progress: %.2f%%\n", stats.CompressionProgress)
}
if stats.CompressionRatio > 0 {
Printf(" Compression Ratio: %.2fx\n", stats.CompressionRatio)
}
if stats.CompressionSpaceSaving > 0 {
Printf("Compression Space Saving: %.2f%%\n", stats.CompressionSpaceSaving)
}
return nil
}
func statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, opts StatsOptions, stats *statsContainer) error {
if snapshot.Tree == nil {
return fmt.Errorf("snapshot %s has nil tree", snapshot.ID().Str())
}
stats.SnapshotsCount++
if opts.countMode == countModeRawData {
// count just the sizes of unique blobs; we don't need to walk the tree
// ourselves in this case, since a nifty function does it for us
return restic.FindUsedBlobs(ctx, repo, restic.IDs{*snapshot.Tree}, stats.blobs, nil)
}
hardLinkIndex := restorer.NewHardlinkIndex[struct{}]()
err := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, opts, stats, hardLinkIndex))
if err != nil {
return fmt.Errorf("walking tree %s: %v", *snapshot.Tree, err)
}
return nil
}
func statsWalkTree(repo restic.Repository, opts StatsOptions, stats *statsContainer, hardLinkIndex *restorer.HardlinkIndex[struct{}]) walker.WalkFunc {
return func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {
if nodeErr != nil {
return true, nodeErr
}
if node == nil {
return true, nil
}
if opts.countMode == countModeUniqueFilesByContents || opts.countMode == countModeBlobsPerFile {
// only count this file if we haven't visited it before
fid := makeFileIDByContents(node)
if _, ok := stats.uniqueFiles[fid]; !ok {
// mark the file as visited
stats.uniqueFiles[fid] = struct{}{}
if opts.countMode == countModeUniqueFilesByContents {
// simply count the size of each unique file (unique by contents only)
stats.TotalSize += node.Size
stats.TotalFileCount++
}
if opts.countMode == countModeBlobsPerFile {
// count the size of each unique blob reference, which is
// by unique file (unique by contents and file path)
for _, blobID := range node.Content {
// ensure we have this file (by path) in our map; in this
// mode, a file is unique by both contents and path
nodePath := filepath.Join(npath, node.Name)
if _, ok := stats.fileBlobs[nodePath]; !ok {
stats.fileBlobs[nodePath] = restic.NewIDSet()
stats.TotalFileCount++
}
if _, ok := stats.fileBlobs[nodePath][blobID]; !ok {
// is always a data blob since we're accessing it via a file's Content array
blobSize, found := repo.LookupBlobSize(blobID, restic.DataBlob)
if !found {
return true, fmt.Errorf("blob %s not found for tree %s", blobID, parentTreeID)
}
// count the blob's size, then add this blob by this
// file (path) so we don't double-count it
stats.TotalSize += uint64(blobSize)
stats.fileBlobs[nodePath].Insert(blobID)
// this mode also counts total unique blob _references_ per file
stats.TotalBlobCount++
}
}
}
}
}
if opts.countMode == countModeRestoreSize {
// as this is a file in the snapshot, we can simply count its
// size without worrying about uniqueness, since duplicate files
// will still be restored
stats.TotalFileCount++
// if inodes are present, only count each inode once
// (hard links do not increase restore size)
if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 {
hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{})
stats.TotalSize += node.Size
}
return false, nil
}
return true, nil
}
}
// makeFileIDByContents returns a hash of the blob IDs of the
// node's Content in sequence.
func makeFileIDByContents(node *restic.Node) fileID {
var bb []byte
for _, c := range node.Content {
bb = append(bb, []byte(c[:])...)
}
return sha256.Sum256(bb)
}
func verifyStatsInput(opts StatsOptions) error {
// require a recognized counting mode
switch opts.countMode {
case countModeRestoreSize:
case countModeUniqueFilesByContents:
case countModeBlobsPerFile:
case countModeRawData:
case countModeDebug:
default:
return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", opts.countMode)
}
return nil
}
// statsContainer holds information during a walk of a repository
// to collect information about it, as well as state needed
// for a successful and efficient walk.
type statsContainer struct {
TotalSize uint64 `json:"total_size"`
TotalUncompressedSize uint64 `json:"total_uncompressed_size,omitempty"`
TotalCompressedBlobsSize uint64 `json:"-"`
TotalCompressedBlobsUncompressedSize uint64 `json:"-"`
CompressionRatio float64 `json:"compression_ratio,omitempty"`
CompressionProgress float64 `json:"compression_progress,omitempty"`
CompressionSpaceSaving float64 `json:"compression_space_saving,omitempty"`
TotalFileCount uint64 `json:"total_file_count,omitempty"`
TotalBlobCount uint64 `json:"total_blob_count,omitempty"`
// holds count of all considered snapshots
SnapshotsCount int `json:"snapshots_count"`
// uniqueFiles marks visited files according to their
// contents (hashed sequence of content blob IDs)
uniqueFiles map[fileID]struct{}
// fileBlobs maps a file name (path) to the set of
// blobs that have been seen as a part of the file
fileBlobs map[string]restic.IDSet
// blobs is used to count individual unique blobs,
// independent of references to files
blobs restic.BlobSet
}
// fileID is a 256-bit hash that distinguishes unique files.
type fileID [32]byte
const (
countModeRestoreSize = "restore-size"
countModeUniqueFilesByContents = "files-by-contents"
countModeBlobsPerFile = "blobs-per-file"
countModeRawData = "raw-data"
countModeDebug = "debug"
)
func statsDebug(ctx context.Context, repo restic.Repository) error {
Warnf("Collecting size statistics\n\n")
for _, t := range []restic.FileType{restic.KeyFile, restic.LockFile, restic.IndexFile, restic.PackFile} {
hist, err := statsDebugFileType(ctx, repo, t)
if err != nil {
return err
}
Warnf("File Type: %v\n%v\n", t, hist)
}
hist := statsDebugBlobs(ctx, repo)
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
Warnf("Blob Type: %v\n%v\n\n", t, hist[t])
}
return nil
}
func statsDebugFileType(ctx context.Context, repo restic.Repository, tpe restic.FileType) (*sizeHistogram, error) {
hist := newSizeHistogram(2 * repository.MaxPackSize)
err := repo.List(ctx, tpe, func(id restic.ID, size int64) error {
hist.Add(uint64(size))
return nil
})
return hist, err
}
func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram {
var hist [restic.NumBlobTypes]*sizeHistogram
for i := 0; i < len(hist); i++ {
hist[i] = newSizeHistogram(2 * chunker.MaxSize)
}
repo.Index().Each(ctx, func(pb restic.PackedBlob) {
hist[pb.Type].Add(uint64(pb.Length))
})
return hist
}
type sizeClass struct {
lower, upper uint64
count int64
}
type sizeHistogram struct {
count int64
totalSize uint64
buckets []sizeClass
oversized []uint64
}
func newSizeHistogram(sizeLimit uint64) *sizeHistogram {
h := &sizeHistogram{}
h.buckets = append(h.buckets, sizeClass{0, 0, 0})
lowerBound := uint64(1)
growthFactor := uint64(10)
for lowerBound < sizeLimit {
upperBound := lowerBound*growthFactor - 1
if upperBound > sizeLimit {
upperBound = sizeLimit
}
h.buckets = append(h.buckets, sizeClass{lowerBound, upperBound, 0})
lowerBound *= growthFactor
}
return h
}
func (s *sizeHistogram) Add(size uint64) {
s.count++
s.totalSize += size
for i, bucket := range s.buckets {
if size >= bucket.lower && size <= bucket.upper {
s.buckets[i].count++
return
}
}
s.oversized = append(s.oversized, size)
}
func (s sizeHistogram) String() string {
var out strings.Builder
out.WriteString(fmt.Sprintf("Count: %d\n", s.count))
out.WriteString(fmt.Sprintf("Total Size: %s\n", ui.FormatBytes(s.totalSize)))
t := table.New()
t.AddColumn("Size", "{{.SizeRange}}")
t.AddColumn("Count", "{{.Count}}")
type line struct {
SizeRange string
Count int64
}
// only print up to the highest used bucket size
lastFilledIdx := 0
for i := 0; i < len(s.buckets); i++ {
if s.buckets[i].count != 0 {
lastFilledIdx = i
}
}
var lines []line
hasStarted := false
for i, b := range s.buckets {
if i > lastFilledIdx {
break
}
if b.count > 0 {
hasStarted = true
}
if hasStarted {
lines = append(lines, line{
SizeRange: fmt.Sprintf("%d - %d Byte", b.lower, b.upper),
Count: b.count,
})
}
}
longestRange := 0
for _, l := range lines {
if longestRange < len(l.SizeRange) {
longestRange = len(l.SizeRange)
}
}
for i := range lines {
lines[i].SizeRange = strings.Repeat(" ", longestRange-len(lines[i].SizeRange)) + lines[i].SizeRange
t.AddRow(lines[i])
}
_ = t.Write(&out)
if len(s.oversized) > 0 {
out.WriteString(fmt.Sprintf("Oversized: %v\n", s.oversized))
}
return out.String()
}
| package main
import (
"testing"
rtest "github.com/restic/restic/internal/test"
)
func TestSizeHistogramNew(t *testing.T) {
h := newSizeHistogram(42)
exp := &sizeHistogram{
count: 0,
totalSize: 0,
buckets: []sizeClass{
{0, 0, 0},
{1, 9, 0},
{10, 42, 0},
},
}
rtest.Equals(t, exp, h)
}
func TestSizeHistogramAdd(t *testing.T) {
h := newSizeHistogram(42)
for i := uint64(0); i < 45; i++ {
h.Add(i)
}
exp := &sizeHistogram{
count: 45,
totalSize: 990,
buckets: []sizeClass{
{0, 0, 1},
{1, 9, 9},
{10, 42, 33},
},
oversized: []uint64{43, 44},
}
rtest.Equals(t, exp, h)
}
func TestSizeHistogramString(t *testing.T) {
t.Run("overflow", func(t *testing.T) {
h := newSizeHistogram(42)
h.Add(8)
h.Add(50)
rtest.Equals(t, "Count: 2\nTotal Size: 58 B\nSize Count\n-----------------\n1 - 9 Byte 1\n-----------------\nOversized: [50]\n", h.String())
})
t.Run("withZero", func(t *testing.T) {
h := newSizeHistogram(42)
h.Add(0)
h.Add(1)
h.Add(10)
rtest.Equals(t, "Count: 3\nTotal Size: 11 B\nSize Count\n-------------------\n 0 - 0 Byte 1\n 1 - 9 Byte 1\n10 - 42 Byte 1\n-------------------\n", h.String())
})
}
| 215,470 |