diff --git a/duplicacy/duplicacy_main.go b/duplicacy/duplicacy_main.go index 6934bbc0..ce54fa95 100644 --- a/duplicacy/duplicacy_main.go +++ b/duplicacy/duplicacy_main.go @@ -22,9 +22,7 @@ import ( "github.com/gilbertchen/cli" - "io/ioutil" - - "github.com/gilbertchen/duplicacy/src" + duplicacy "github.com/gilbertchen/duplicacy/src" ) const ( @@ -316,7 +314,7 @@ func configRepository(context *cli.Context, init bool) { // write real path into .duplicacy file inside repository duplicacyFileName := path.Join(repository, duplicacy.DUPLICACY_FILE) d1 := []byte(preferencePath) - err = ioutil.WriteFile(duplicacyFileName, d1, 0644) + err = os.WriteFile(duplicacyFileName, d1, 0644) if err != nil { duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to write %s file inside repository %v", duplicacyFileName, err) return @@ -705,7 +703,7 @@ func changePassword(context *cli.Context) { } configPath := path.Join(duplicacy.GetDuplicacyPreferencePath(), "config") - err = ioutil.WriteFile(configPath, description, 0600) + err = os.WriteFile(configPath, description, 0600) if err != nil { duplicacy.LOG_ERROR("CONFIG_SAVE", "Failed to save the old config to %s: %v", configPath, err) return @@ -1043,7 +1041,6 @@ func printFile(context *cli.Context) { snapshotID = context.String("id") } - backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false) duplicacy.SavePassword(*preference, "password", password) @@ -1290,7 +1287,7 @@ func copySnapshots(context *cli.Context) { destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate")) destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository, - destinationPassword, "", "", false) + destinationPassword, "", "", false) duplicacy.SavePassword(*destination, "password", destinationPassword) destinationManager.SetupSnapshotCache(destination.Name) @@ -1415,7 +1412,7 @@ func benchmark(context *cli.Context) { if storage == nil { return } - duplicacy.Benchmark(repository, storage, int64(fileSize) * 1024 * 1024, chunkSize * 1024 * 1024, chunkCount, uploadThreads, downloadThreads) + duplicacy.Benchmark(repository, storage, int64(fileSize)*1024*1024, chunkSize*1024*1024, chunkCount, uploadThreads, downloadThreads) } func main() { @@ -1454,8 +1451,8 @@ func main() { Argument: "", }, cli.BoolFlag{ - Name: "zstd", - Usage: "short for -zstd default", + Name: "zstd", + Usage: "short for -zstd default", }, cli.IntFlag{ Name: "iterations", @@ -1530,8 +1527,8 @@ func main() { Argument: "", }, cli.BoolFlag{ - Name: "zstd", - Usage: "short for -zstd default", + Name: "zstd", + Usage: "short for -zstd default", }, cli.BoolFlag{ Name: "vss", @@ -1564,7 +1561,6 @@ func main() { Usage: "the maximum number of entries kept in memory (defaults to 1M)", Argument: "", }, - }, Usage: "Save a snapshot of the repository to the storage", ArgsUsage: " ", @@ -1624,7 +1620,7 @@ func main() { cli.BoolFlag{ Name: "persist", Usage: "continue processing despite chunk errors or existing files (without -overwrite), reporting any affected files", - }, + }, cli.StringFlag{ Name: "key-passphrase", Usage: "the passphrase to decrypt the RSA private key", @@ -1982,8 +1978,8 @@ func main() { Argument: "", }, cli.BoolFlag{ - Name: "zstd", - Usage: "short for -zstd default", + Name: "zstd", + Usage: "short for -zstd default", }, cli.IntFlag{ Name: "iterations", @@ -2248,8 +2244,8 @@ func main() { Usage: "add a comment to identify the process", }, cli.StringSliceFlag{ - Name: "suppress, s", - Usage: "suppress logs with the specified id", + Name: "suppress, s", + Usage: "suppress logs with the specified id", Argument: "", }, cli.BoolFlag{ diff --git a/src/duplicacy_acdclient.go b/src/duplicacy_acdclient.go index 95901f80..f69a3c28 100644 --- a/src/duplicacy_acdclient.go +++ b/src/duplicacy_acdclient.go @@ -9,10 +9,10 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" "mime/multipart" "net/http" + "os" "sync" "time" @@ -45,7 +45,7 @@ type ACDClient struct { func NewACDClient(tokenFile string) (*ACDClient, error) { - description, err := ioutil.ReadFile(tokenFile) + description, err := os.ReadFile(tokenFile) if err != nil { return nil, err } @@ -208,7 +208,7 @@ func (client *ACDClient) RefreshToken() (err error) { return err } - err = ioutil.WriteFile(client.TokenFile, description, 0644) + err = os.WriteFile(client.TokenFile, description, 0644) if err != nil { return err } diff --git a/src/duplicacy_b2client.go b/src/duplicacy_b2client.go index 0182cebd..5744f1fd 100644 --- a/src/duplicacy_b2client.go +++ b/src/duplicacy_b2client.go @@ -5,22 +5,21 @@ package duplicacy import ( - "io" - "os" - "fmt" "bytes" - "time" - "sync" - "strconv" - "strings" - "net/url" - "net/http" - "math/rand" - "io/ioutil" "crypto/sha1" + "encoding/base64" "encoding/hex" "encoding/json" - "encoding/base64" + "fmt" + "io" + "math/rand" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" ) type B2Error struct { @@ -41,14 +40,14 @@ type B2UploadArgument struct { var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_account" type B2Client struct { - HTTPClient *http.Client + HTTPClient *http.Client - AccountID string - ApplicationKeyID string - ApplicationKey string - BucketName string - BucketID string - StorageDir string + AccountID string + ApplicationKeyID string + ApplicationKey string + BucketName string + BucketID string + StorageDir string Lock sync.Mutex AuthorizationToken string @@ -56,12 +55,12 @@ type B2Client struct { DownloadURL string IsAuthorized bool - UploadURLs []string - UploadTokens []string + UploadURLs []string + UploadTokens []string - Threads int - MaximumRetries int - TestMode bool + Threads int + MaximumRetries int + TestMode bool LastAuthorizationTime int64 } @@ -81,7 +80,7 @@ func NewB2Client(applicationKeyID string, applicationKey string, downloadURL str storageDir = storageDir[1:] } - if storageDir != "" && storageDir[len(storageDir) - 1] != '/' { + if storageDir != "" && storageDir[len(storageDir)-1] != '/' { storageDir += "/" } @@ -128,7 +127,7 @@ func (client *B2Client) retry(retries int, response *http.Response) int { } } - if retries >= client.MaximumRetries + 1 { + if retries >= client.MaximumRetries+1 { return 0 } retries++ @@ -143,7 +142,7 @@ func (client *B2Client) retry(retries int, response *http.Response) int { } func (client *B2Client) call(threadIndex int, requestURL string, method string, requestHeaders map[string]string, input interface{}) ( - io.ReadCloser, http.Header, int64, error) { + io.ReadCloser, http.Header, int64, error) { var response *http.Response @@ -171,7 +170,6 @@ func (client *B2Client) call(threadIndex int, requestURL string, method string, inputReader = rateLimitedReader } - if isUpload { if client.UploadURLs[threadIndex] == "" || client.UploadTokens[threadIndex] == "" { err := client.getUploadURL(threadIndex) @@ -303,7 +301,7 @@ func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bo defer client.Lock.Unlock() // Don't authorize if the previous one was done less than 30 seconds ago - if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix() - 30 { + if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix()-30 { return nil, false } @@ -426,7 +424,7 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions" } else if singleFile { // handle a single file with no versions as a special case to download the last byte of the file - apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName) + apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir+startFileName) // requesting byte -1 works for empty files where 0-0 fails with a 416 error requestHeaders["Range"] = "bytes=-1" // HEAD request @@ -500,7 +498,7 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin return nil, err } - ioutil.ReadAll(readCloser) + io.ReadAll(readCloser) for _, file := range output.Files { file.FileName = file.FileName[len(client.StorageDir):] @@ -585,7 +583,7 @@ func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID strin func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) { if !strings.HasSuffix(filePath, ".fsl") { - url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath) + url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir+filePath) readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0) return readCloser, len, err diff --git a/src/duplicacy_benchmark.go b/src/duplicacy_benchmark.go index ffe46f53..85080d39 100644 --- a/src/duplicacy_benchmark.go +++ b/src/duplicacy_benchmark.go @@ -10,7 +10,6 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -113,7 +112,7 @@ func Benchmark(localDirectory string, storage Storage, fileSize int64, chunkSize startTime := float64(time.Now().UnixNano()) / 1e9 LOG_INFO("BENCHMARK_WRITE", "Writing random data to local disk") - err = ioutil.WriteFile(filename, data, 0600) + err = io.WriteFile(filename, data, 0600) if err != nil { LOG_ERROR("BENCHMARK_WRITE", "Failed to write the random data: %v", err) return false diff --git a/src/duplicacy_config.go b/src/duplicacy_config.go index 24b4386f..f88dc2fd 100644 --- a/src/duplicacy_config.go +++ b/src/duplicacy_config.go @@ -8,8 +8,8 @@ import ( "bytes" "crypto/hmac" "crypto/rand" - "crypto/sha256" "crypto/rsa" + "crypto/sha256" "crypto/x509" "encoding/binary" "encoding/hex" @@ -18,12 +18,11 @@ import ( "fmt" "hash" "os" - "strings" + "reflect" "runtime" "runtime/debug" + "strings" "sync/atomic" - "io/ioutil" - "reflect" blake2 "github.com/minio/blake2b-simd" ) @@ -41,8 +40,8 @@ var ZSTD_COMPRESSION_LEVEL_DEFAULT = 201 var ZSTD_COMPRESSION_LEVEL_BETTER = 202 var ZSTD_COMPRESSION_LEVEL_BEST = 203 -var ZSTD_COMPRESSION_LEVELS = map[string]int { - "fastest": ZSTD_COMPRESSION_LEVEL_FASTEST, +var ZSTD_COMPRESSION_LEVELS = map[string]int{ + "fastest": ZSTD_COMPRESSION_LEVEL_FASTEST, "default": ZSTD_COMPRESSION_LEVEL_DEFAULT, "better": ZSTD_COMPRESSION_LEVEL_BETTER, "best": ZSTD_COMPRESSION_LEVEL_BEST, @@ -85,12 +84,12 @@ type Config struct { FileKey []byte `json:"-"` // for erasure coding - DataShards int `json:'data-shards'` + DataShards int `json:'data-shards'` ParityShards int `json:'parity-shards'` // for RSA encryption rsaPrivateKey *rsa.PrivateKey - rsaPublicKey *rsa.PublicKey + rsaPublicKey *rsa.PublicKey chunkPool chan *Chunk numberOfChunks int32 @@ -102,17 +101,17 @@ type aliasedConfig Config type jsonableConfig struct { *aliasedConfig - ChunkSeed string `json:"chunk-seed"` - HashKey string `json:"hash-key"` - IDKey string `json:"id-key"` - ChunkKey string `json:"chunk-key"` - FileKey string `json:"file-key"` + ChunkSeed string `json:"chunk-seed"` + HashKey string `json:"hash-key"` + IDKey string `json:"id-key"` + ChunkKey string `json:"chunk-key"` + FileKey string `json:"file-key"` RSAPublicKey string `json:"rsa-public-key"` } func (config *Config) MarshalJSON() ([]byte, error) { - publicKey := []byte {} + publicKey := []byte{} if config.rsaPublicKey != nil { publicKey, _ = x509.MarshalPKIXPublicKey(config.rsaPublicKey) } @@ -596,7 +595,7 @@ func (config *Config) loadRSAPublicKey(keyFile string) { // keyFile may be the actually key, in which case we don't need to read from a file if !strings.Contains(keyFile, "-----BEGIN") { - encodedKey, err = ioutil.ReadFile(keyFile) + encodedKey, err = os.ReadFile(keyFile) if err != nil { LOG_ERROR("BACKUP_KEY", "Failed to read the public key file: %v", err) return @@ -641,7 +640,7 @@ func (config *Config) loadRSAPrivateKey(keyFile string, passphrase string) { // keyFile may be the actually key, in which case we don't need to read from a file if !strings.Contains(keyFile, "-----BEGIN") { - encodedKey, err = ioutil.ReadFile(keyFile) + encodedKey, err = os.ReadFile(keyFile) if err != nil { LOG_ERROR("RSA_PRIVATE", "Failed to read the private key file: %v", err) return diff --git a/src/duplicacy_dropboxstorage.go b/src/duplicacy_dropboxstorage.go index a7a1e985..77b21f81 100644 --- a/src/duplicacy_dropboxstorage.go +++ b/src/duplicacy_dropboxstorage.go @@ -6,7 +6,7 @@ package duplicacy import ( "fmt" - "io/ioutil" + "io" "strings" "github.com/gilbertchen/go-dropbox" @@ -200,7 +200,7 @@ func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, ch } defer output.Body.Close() - defer ioutil.ReadAll(output.Body) + defer io.ReadAll(output.Body) _, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients)) return err diff --git a/src/duplicacy_entry.go b/src/duplicacy_entry.go index fa56b865..46ed16e8 100644 --- a/src/duplicacy_entry.go +++ b/src/duplicacy_entry.go @@ -4,10 +4,11 @@ package duplicacy import ( + "bytes" + "crypto/sha256" "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -16,11 +17,8 @@ import ( "strconv" "strings" "time" - "bytes" - "crypto/sha256" - - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack" ) // This is the hidden directory in the repository for storing various files. @@ -110,10 +108,10 @@ func (entry *Entry) Copy() *Entry { UID: entry.UID, GID: entry.GID, - StartChunk: entry.StartChunk, + StartChunk: entry.StartChunk, StartOffset: entry.StartOffset, - EndChunk: entry.EndChunk, - EndOffset: entry.EndOffset, + EndChunk: entry.EndChunk, + EndOffset: entry.EndOffset, Attributes: entry.Attributes, } @@ -362,12 +360,12 @@ func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error { if entry.Attributes != nil { attributes := make([]string, numberOfAttributes) - i := 0 - for attribute := range *entry.Attributes { - attributes[i] = attribute - i++ - } - sort.Strings(attributes) + i := 0 + for attribute := range *entry.Attributes { + attributes[i] = attribute + i++ + } + sort.Strings(attributes) for _, attribute := range attributes { err = encoder.EncodeString(attribute) if err != nil { @@ -380,7 +378,7 @@ func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error { } } - return nil + return nil } func (entry *Entry) DecodeMsgpack(decoder *msgpack.Decoder) error { @@ -498,8 +496,8 @@ func (entry *Entry) GetPermissions() os.FileMode { func (entry *Entry) GetParent() string { path := entry.Path - if path != "" && path[len(path) - 1] == '/' { - path = path[:len(path) - 1] + if path != "" && path[len(path)-1] == '/' { + path = path[:len(path)-1] } i := strings.LastIndex(path, "/") if i == -1 { @@ -596,7 +594,7 @@ func ComparePaths(left string, right string) int { for i := p; i < len(left); i++ { c3 = left[i] if c3 == '/' { - last1 = i == len(left) - 1 + last1 = i == len(left)-1 break } } @@ -606,7 +604,7 @@ func ComparePaths(left string, right string) int { for i := p; i < len(right); i++ { c4 = right[i] if c4 == '/' { - last2 = i == len(right) - 1 + last2 = i == len(right)-1 break } } @@ -703,14 +701,24 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string fullPath := joinPath(top, path) - files := make([]os.FileInfo, 0, 1024) + dirEntries := make([]os.DirEntry, 0, 1024) - files, err = ioutil.ReadDir(fullPath) + dirEntries, err = os.ReadDir(fullPath) if err != nil { return directoryList, nil, err } - // This binary search works because ioutil.ReadDir returns files sorted by Name() by default + // Map os.DirEntry to os.FileInfo + files := make([]os.FileInfo, 0, 1024) + for _, file := range dirEntries { + fileInfo, err := file.Info() + if err != nil { + return directoryList, nil, err + } + files = append(files, fileInfo) + } + + // This binary search works because os.ReadDir returns files sorted by Name() by default if nobackupFile != "" { ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0 }) if ii < len(files) && files[ii].Name() == nobackupFile { diff --git a/src/duplicacy_entry_test.go b/src/duplicacy_entry_test.go index 670a16b3..e5214609 100644 --- a/src/duplicacy_entry_test.go +++ b/src/duplicacy_entry_test.go @@ -5,7 +5,8 @@ package duplicacy import ( - "io/ioutil" + "bytes" + "encoding/json" "math/rand" "os" "path/filepath" @@ -13,11 +14,9 @@ import ( "sort" "strings" "testing" - "bytes" - "encoding/json" "github.com/gilbertchen/xattr" - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack" ) func TestEntrySort(t *testing.T) { @@ -165,7 +164,7 @@ func TestEntryOrder(t *testing.T) { continue } - err := ioutil.WriteFile(fullPath, []byte(file), 0700) + err := os.WriteFile(fullPath, []byte(file), 0700) if err != nil { t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err) } @@ -175,7 +174,7 @@ func TestEntryOrder(t *testing.T) { directories = append(directories, CreateEntry("", 0, 0, 0)) entries := make([]*Entry, 0, 4) - entryChannel := make(chan *Entry, 1024) + entryChannel := make(chan *Entry, 1024) entries = append(entries, CreateEntry("", 0, 0, 0)) for len(directories) > 0 { @@ -264,7 +263,7 @@ func TestEntryExcludeByAttribute(t *testing.T) { continue } - err := ioutil.WriteFile(fullPath, []byte(file), 0700) + err := os.WriteFile(fullPath, []byte(file), 0700) if err != nil { t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err) } @@ -372,4 +371,4 @@ func TestEntryEncoding(t *testing.T) { t.Error("Decoded entry is different than the original one") } -} \ No newline at end of file +} diff --git a/src/duplicacy_filefabricstorage.go b/src/duplicacy_filefabricstorage.go index a9574afd..31794c50 100644 --- a/src/duplicacy_filefabricstorage.go +++ b/src/duplicacy_filefabricstorage.go @@ -1,5 +1,5 @@ // Copyright (c) Storage Made Easy. All rights reserved. -// +// // This storage backend is contributed by Storage Made Easy (https://storagemadeeasy.com/) to be used in // Duplicacy and its derivative works. // @@ -7,45 +7,44 @@ package duplicacy import ( - "io" - "fmt" - "time" - "sync" "bytes" + "encoding/xml" "errors" - "strings" - "net/url" - "net/http" + "fmt" + "io" "math/rand" - "io/ioutil" - "encoding/xml" - "path/filepath" "mime/multipart" + "net/http" + "net/url" + "path/filepath" + "strings" + "sync" + "time" ) // The XML element representing a file returned by the File Fabric server type FileFabricFile struct { - XMLName xml.Name - ID string `xml:"fi_id"` - Path string `xml:"path"` - Size int64 `xml:"fi_size"` - Type int `xml:"fi_type"` + XMLName xml.Name + ID string `xml:"fi_id"` + Path string `xml:"path"` + Size int64 `xml:"fi_size"` + Type int `xml:"fi_type"` } // The XML element representing a file list returned by the server type FileFabricFileList struct { - XMLName xml.Name `xml:"files"` - Files []FileFabricFile `xml:",any"` + XMLName xml.Name `xml:"files"` + Files []FileFabricFile `xml:",any"` } type FileFabricStorage struct { StorageBase - endpoint string // the server - authToken string // the authentication token - accessToken string // the access token (as returned by getTokenByAuthToken) - storageDir string // the path of the storage directory - storageDirID string // the id of 'storageDir' + endpoint string // the server + authToken string // the authentication token + accessToken string // the access token (as returned by getTokenByAuthToken) + storageDir string // the path of the storage directory + storageDirID string // the id of 'storageDir' client *http.Client // the default http client threads int // number of threads @@ -53,18 +52,18 @@ type FileFabricStorage struct { directoryCache map[string]string // stores ids for directories known to this backend directoryCacheLock sync.Mutex // lock for accessing directoryCache - isAuthorized bool - testMode bool + isAuthorized bool + testMode bool } var ( errFileFabricAuthorizationFailure = errors.New("Authentication failure") - errFileFabricDirectoryExists = errors.New("Directory exists") + errFileFabricDirectoryExists = errors.New("Directory exists") ) // The general server response type FileFabricResponse struct { - Status string `xml:"status"` + Status string `xml:"status"` Message string `xml:"statusmessage"` } @@ -128,26 +127,26 @@ func CreateFileFabricStorage(endpoint string, token string, storageDir string, t } // Retrieve the access token using an auth token -func (storage *FileFabricStorage) getAccessToken() (error) { +func (storage *FileFabricStorage) getAccessToken() error { - formData := url.Values { "authtoken": {storage.authToken},} + formData := url.Values{"authtoken": {storage.authToken}} readCloser, _, _, err := storage.sendRequest(0, http.MethodPost, storage.getAPIURL("getTokenByAuthToken"), nil, formData) if err != nil { return err } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(io.Discard, readCloser) - var output struct { + var output struct { FileFabricResponse - Token string `xml:"token"` - } + Token string `xml:"token"` + } - err = xml.NewDecoder(readCloser).Decode(&output) - if err != nil { - return err - } + err = xml.NewDecoder(readCloser).Decode(&output) + if err != nil { + return err + } err = checkFileFabricResponse(output.FileFabricResponse, "request the access token") if err != nil { @@ -155,7 +154,7 @@ func (storage *FileFabricStorage) getAccessToken() (error) { } storage.accessToken = output.Token - return nil + return nil } // Determine if we should retry based on the number of retries given by 'retry' and if so calculate the delay with exponential backoff @@ -171,13 +170,13 @@ func (storage *FileFabricStorage) shouldRetry(retry int, messageFormat string, m backoff = 60 } delay := rand.Intn(backoff*500) + backoff*500 - LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay) / 1000.0) + LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay)/1000.0) time.Sleep(time.Duration(delay) * time.Millisecond) return true } // Send a request to the server -func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) ( io.ReadCloser, http.Header, int64, error) { +func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) (io.ReadCloser, http.Header, int64, error) { var response *http.Response @@ -229,13 +228,13 @@ func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, re } defer response.Body.Close() - defer io.Copy(ioutil.Discard, response.Body) + defer io.Copy(io.Discard, response.Body) var output struct { - Status string `xml:"status"` + Status string `xml:"status"` Message string `xml:"statusmessage"` } - + err = xml.NewDecoder(response.Body).Decode(&output) if err != nil { if !storage.shouldRetry(retries, "[%d] %s %s returned an invalid response: %v", threadIndex, method, requestURL, err) { @@ -279,7 +278,7 @@ func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files lastID := "" for { - formData := url.Values { "marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid" : {dirID}} + formData := url.Values{"marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid": {dirID}} if dir == "snapshots/" { formData["includefolders"] = []string{"y"} } @@ -293,12 +292,12 @@ func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(io.Discard, readCloser) var output struct { FileFabricResponse - FileList FileFabricFileList `xml:"files"` - Truncated int `xml:"truncated"` + FileList FileFabricFileList `xml:"files"` + Truncated int `xml:"truncated"` } err = xml.NewDecoder(readCloser).Decode(&output) @@ -314,7 +313,7 @@ func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files if dir == "snapshots/" { for _, file := range output.FileList.Files { if file.Type == 1 { - files = append(files, file.Path + "/") + files = append(files, file.Path+"/") } lastID = file.ID } @@ -338,7 +337,7 @@ func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files // getFileInfo returns the information about the file or directory at 'filePath'. func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) (fileID string, isDir bool, size int64, err error) { - formData := url.Values { "path" : {storage.storageDir + filePath}} + formData := url.Values{"path": {storage.storageDir + filePath}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("checkPathExists"), nil, formData) if err != nil { @@ -346,12 +345,12 @@ func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(io.Discard, readCloser) var output struct { FileFabricResponse - File FileFabricFile `xml:"file"` - Exists string `xml:"exists"` + File FileFabricFile `xml:"file"` + Exists string `xml:"exists"` } err = xml.NewDecoder(readCloser).Decode(&output) @@ -371,7 +370,7 @@ func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) for filePath != "" && filePath[len(filePath)-1] == '/' { filePath = filePath[:len(filePath)-1] } - + storage.directoryCacheLock.Lock() storage.directoryCache[filePath] = output.File.ID storage.directoryCacheLock.Unlock() @@ -396,7 +395,7 @@ func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) ( return nil } - formData := url.Values { "fi_id" : {fileID}} + formData := url.Values{"fi_id": {fileID}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doDeleteFile"), nil, formData) if err != nil { @@ -404,7 +403,7 @@ func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) ( } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(io.Discard, readCloser) var output FileFabricResponse @@ -428,7 +427,7 @@ func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to stri return nil } - formData := url.Values { "fi_id" : {fileID}, "fi_name": {filepath.Base(to)},} + formData := url.Values{"fi_id": {fileID}, "fi_name": {filepath.Base(to)}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doRenameFile"), nil, formData) if err != nil { @@ -436,7 +435,7 @@ func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to stri } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(io.Discard, readCloser) var output FileFabricResponse @@ -449,7 +448,7 @@ func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to stri if err != nil { return err } - + return nil } @@ -473,7 +472,7 @@ func (storage *FileFabricStorage) createParentDirectory(threadIndex int, dir str parentID, err = storage.createDirectory(threadIndex, parent) if err != nil { if err == errFileFabricDirectoryExists { - var isDir bool + var isDir bool parentID, isDir, _, err = storage.getFileInfo(threadIndex, parent) if err != nil { return "", err @@ -503,7 +502,7 @@ func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) ( return "", err } - formData := url.Values { "fi_name": {filepath.Base(dir)}, "fi_pid" : {parentID}} + formData := url.Values{"fi_name": {filepath.Base(dir)}, "fi_pid": {parentID}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doCreateNewFolder"), nil, formData) if err != nil { @@ -511,7 +510,7 @@ func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) ( } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(io.Discard, readCloser) var output struct { FileFabricResponse @@ -545,7 +544,7 @@ func (storage *FileFabricStorage) CreateDirectory(threadIndex int, dir string) ( // DownloadFile reads the file at 'filePath' into the chunk. func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) { - formData := url.Values { "fi_id" : {storage.storageDir + filePath}} + formData := url.Values{"fi_id": {storage.storageDir + filePath}} readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getFile"), nil, formData) if err != nil { @@ -553,7 +552,7 @@ func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(io.Discard, readCloser) _, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads) return err } @@ -567,15 +566,15 @@ func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, c } fileName := filepath.Base(filePath) - requestBody := &bytes.Buffer{} - writer := multipart.NewWriter(requestBody) - part, _ := writer.CreateFormFile("file_1", fileName) - part.Write(content) + requestBody := &bytes.Buffer{} + writer := multipart.NewWriter(requestBody) + part, _ := writer.CreateFormFile("file_1", fileName) + part.Write(content) - writer.WriteField("file_name1", fileName) + writer.WriteField("file_name1", fileName) writer.WriteField("fi_pid", parentID) writer.WriteField("fi_structtype", "g") - writer.Close() + writer.Close() headers := make(map[string]string) headers["Content-Type"] = writer.FormDataContentType() @@ -584,7 +583,7 @@ func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, c readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doUploadFiles"), headers, rateLimitedReader) defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(io.Discard, readCloser) var output FileFabricResponse diff --git a/src/duplicacy_filestorage.go b/src/duplicacy_filestorage.go index fe9387bb..24589a13 100644 --- a/src/duplicacy_filestorage.go +++ b/src/duplicacy_filestorage.go @@ -7,7 +7,6 @@ package duplicacy import ( "fmt" "io" - "io/ioutil" "math/rand" "os" "path" @@ -69,7 +68,7 @@ func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []stri fullPath := path.Join(storage.storageDir, dir) - list, err := ioutil.ReadDir(fullPath) + list, err := os.ReadDir(fullPath) if err != nil { if os.IsNotExist(err) { return nil, nil, nil @@ -79,11 +78,15 @@ func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []stri for _, f := range list { name := f.Name() - if (f.IsDir() || f.Mode() & os.ModeSymlink != 0) && name[len(name)-1] != '/' { + fileInfo, err := f.Info() + if err != nil { + return nil, nil, err + } + if (f.IsDir() || fileInfo.Mode()&os.ModeSymlink != 0) && name[len(name)-1] != '/' { name += "/" } files = append(files, name) - sizes = append(sizes, f.Size()) + sizes = append(sizes, fileInfo.Size()) } return files, sizes, nil @@ -165,7 +168,7 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content return err } } else { - if !stat.IsDir() && stat.Mode() & os.ModeSymlink == 0 { + if !stat.IsDir() && stat.Mode()&os.ModeSymlink == 0 { return fmt.Errorf("The path %s is not a directory or symlink", dir) } } diff --git a/src/duplicacy_gcdstorage.go b/src/duplicacy_gcdstorage.go index d7fea7be..ca7950ed 100644 --- a/src/duplicacy_gcdstorage.go +++ b/src/duplicacy_gcdstorage.go @@ -8,11 +8,11 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" "net" "net/http" "net/url" + "os" "path" "strings" "sync" @@ -38,8 +38,8 @@ type GCDStorage struct { service *drive.Service idCache map[string]string // only directories are saved in this cache idCacheLock sync.Mutex - backoffs []int // desired backoff time in seconds for each thread - attempts []int // number of failed attempts since last success for each thread + backoffs []int // desired backoff time in seconds for each thread + attempts []int // number of failed attempts since last success for each thread driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive spaces string // 'appDataFolder' if scope is drive.appdata; 'drive' otherwise @@ -347,7 +347,7 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre ctx := context.Background() - description, err := ioutil.ReadFile(tokenFile) + description, err := os.ReadFile(tokenFile) if err != nil { return nil, err } @@ -382,7 +382,7 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre } if subject, ok := object["subject"]; ok { - config.Subject = subject.(string) + config.Subject = subject.(string) } tokenSource = config.TokenSource(ctx) @@ -442,7 +442,6 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre storage.attempts[i] = 0 } - if scope == drive.DriveAppdataScope { storage.spaces = "appDataFolder" storage.savePathID("", "appDataFolder") @@ -536,7 +535,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i } return files, nil, nil } else { - lock := sync.Mutex {} + lock := sync.Mutex{} allFiles := []string{} allSizes := []int64{} @@ -564,8 +563,8 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i LOG_DEBUG("GCD_STORAGE", "Listing %s; %d items returned", parent, len(entries)) - files := []string {} - sizes := []int64 {} + files := []string{} + sizes := []int64{} for _, entry := range entries { if entry.MimeType != GCDDirectoryMimeType { name := entry.Name @@ -579,7 +578,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i files = append(files, name) sizes = append(sizes, entry.Size) } else { - directoryChannel <- parent+"/"+entry.Name + directoryChannel <- parent + "/" + entry.Name storage.savePathID(parent+"/"+entry.Name, entry.Id) } } @@ -588,14 +587,14 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i allSizes = append(allSizes, sizes...) lock.Unlock() directoryChannel <- "" - } (parent) + }(parent) } if activeWorkers > 0 { select { - case err := <- errorChannel: + case err := <-errorChannel: return nil, nil, err - case directory := <- directoryChannel: + case directory := <-directoryChannel: if directory == "" { activeWorkers-- } else { diff --git a/src/duplicacy_gcsstorage.go b/src/duplicacy_gcsstorage.go index fc280926..ce490d60 100644 --- a/src/duplicacy_gcsstorage.go +++ b/src/duplicacy_gcsstorage.go @@ -8,10 +8,10 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" "net" "net/url" + "os" "time" gcs "cloud.google.com/go/storage" @@ -45,7 +45,7 @@ func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, th ctx := context.Background() - description, err := ioutil.ReadFile(tokenFile) + description, err := os.ReadFile(tokenFile) if err != nil { return nil, err } diff --git a/src/duplicacy_hubicclient.go b/src/duplicacy_hubicclient.go index b270ada4..97254d2b 100644 --- a/src/duplicacy_hubicclient.go +++ b/src/duplicacy_hubicclient.go @@ -9,11 +9,11 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" "net" "net/http" net_url "net/url" + "os" "strings" "sync" "time" @@ -54,7 +54,7 @@ type HubicClient struct { func NewHubicClient(tokenFile string) (*HubicClient, error) { - description, err := ioutil.ReadFile(tokenFile) + description, err := os.ReadFile(tokenFile) if err != nil { return nil, err } @@ -240,7 +240,7 @@ func (client *HubicClient) RefreshToken(force bool) (err error) { return err } - err = ioutil.WriteFile(client.TokenFile, description, 0644) + err = os.WriteFile(client.TokenFile, description, 0644) if err != nil { return err } diff --git a/src/duplicacy_keyring_windows.go b/src/duplicacy_keyring_windows.go index cba4c50e..fa75282c 100644 --- a/src/duplicacy_keyring_windows.go +++ b/src/duplicacy_keyring_windows.go @@ -6,7 +6,6 @@ package duplicacy import ( "encoding/json" - "io/ioutil" "syscall" "unsafe" ) @@ -86,7 +85,7 @@ func keyringGet(key string) (value string) { return "" } - description, err := ioutil.ReadFile(keyringFile) + description, err := io.ReadFile(keyringFile) if err != nil { LOG_DEBUG("KEYRING_READ", "Keyring file not read: %v", err) return "" @@ -125,7 +124,7 @@ func keyringSet(key string, value string) bool { keyring := make(map[string][]byte) - description, err := ioutil.ReadFile(keyringFile) + description, err := io.ReadFile(keyringFile) if err == nil { err = json.Unmarshal(description, &keyring) if err != nil { @@ -160,7 +159,7 @@ func keyringSet(key string, value string) bool { return false } - err = ioutil.WriteFile(keyringFile, description, 0600) + err = io.WriteFile(keyringFile, description, 0600) if err != nil { LOG_DEBUG("KEYRING_WRITE", "Failed to save the keyring storage to file %s: %v", keyringFile, err) return false diff --git a/src/duplicacy_oneclient.go b/src/duplicacy_oneclient.go index 0f25bb91..268dcc76 100644 --- a/src/duplicacy_oneclient.go +++ b/src/duplicacy_oneclient.go @@ -5,19 +5,19 @@ package duplicacy import ( - "context" "bytes" + "context" "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" "net/http" - "strings" + "os" + "path/filepath" "strconv" + "strings" "sync" "time" - "path/filepath" "golang.org/x/oauth2" ) @@ -46,14 +46,14 @@ type OneDriveClient struct { IsConnected bool TestMode bool - IsBusiness bool + IsBusiness bool RefreshTokenURL string - APIURL string + APIURL string } func NewOneDriveClient(tokenFile string, isBusiness bool, client_id string, client_secret string, drive_id string) (*OneDriveClient, error) { - description, err := ioutil.ReadFile(tokenFile) + description, err := os.ReadFile(tokenFile) if err != nil { return nil, err } @@ -67,21 +67,21 @@ func NewOneDriveClient(tokenFile string, isBusiness bool, client_id string, clie HTTPClient: http.DefaultClient, TokenFile: tokenFile, Token: token, - OAConfig: nil, + OAConfig: nil, TokenLock: &sync.Mutex{}, IsBusiness: isBusiness, } - if (client_id != "") { - oneOauthConfig := oauth2.Config{ - ClientID: client_id, - ClientSecret: client_secret, - Scopes: []string{"Files.ReadWrite", "offline_access"}, - Endpoint: oauth2.Endpoint{ - AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize", - TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token", - }, - } + if client_id != "" { + oneOauthConfig := oauth2.Config{ + ClientID: client_id, + ClientSecret: client_secret, + Scopes: []string{"Files.ReadWrite", "offline_access"}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize", + TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token", + }, + } client.OAConfig = &oneOauthConfig } @@ -90,7 +90,7 @@ func NewOneDriveClient(tokenFile string, isBusiness bool, client_id string, clie client.RefreshTokenURL = "https://duplicacy.com/odb_refresh" client.APIURL = "https://graph.microsoft.com/v1.0/me/drive" if drive_id != "" { - client.APIURL = "https://graph.microsoft.com/v1.0/drives/"+drive_id + client.APIURL = "https://graph.microsoft.com/v1.0/drives/" + drive_id } } else { client.RefreshTokenURL = "https://duplicacy.com/one_refresh" @@ -138,7 +138,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{}, if reader, ok := inputReader.(*RateLimitedReader); ok { request.ContentLength = reader.Length() - request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length() - 1, reader.Length())) + request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length()-1, reader.Length())) } if url != client.RefreshTokenURL { @@ -202,10 +202,10 @@ func (client *OneDriveClient) call(url string, method string, input interface{}, } else if response.StatusCode == 409 { return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Conflict"} } else if response.StatusCode > 401 && response.StatusCode != 404 { - delay := int((rand.Float32() * 0.5 + 0.5) * 1000.0 * float32(backoff)) + delay := int((rand.Float32()*0.5 + 0.5) * 1000.0 * float32(backoff)) if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 { retryAfter, _ := strconv.Atoi(backoffList[0]) - if retryAfter * 1000 > delay { + if retryAfter*1000 > delay { delay = retryAfter * 1000 } } @@ -238,7 +238,7 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) { return nil } - if (client.OAConfig == nil) { + if client.OAConfig == nil { readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "") if err != nil { return fmt.Errorf("failed to refresh the access token: %v", err) @@ -252,11 +252,11 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) { } else { ctx := context.Background() tokenSource := client.OAConfig.TokenSource(ctx, client.Token) - token, err := tokenSource.Token() - if err != nil { - return fmt.Errorf("failed to refresh the access token: %v", err) - } - client.Token = token + token, err := tokenSource.Token() + if err != nil { + return fmt.Errorf("failed to refresh the access token: %v", err) + } + client.Token = token } description, err := json.Marshal(client.Token) @@ -264,7 +264,7 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) { return err } - err = ioutil.WriteFile(client.TokenFile, description, 0644) + err = os.WriteFile(client.TokenFile, description, 0644) if err != nil { return err } @@ -327,7 +327,9 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error) func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) { url := client.APIURL + "/root:/" + path - if path == "" { url = client.APIURL + "/root" } + if path == "" { + url = client.APIURL + "/root" + } url += "?select=id,name,size,folder" readCloser, _, err := client.call(url, "GET", 0, "") @@ -361,7 +363,7 @@ func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit // Upload file using the simple method; this is only possible for OneDrive Personal or if the file // is smaller than 4MB for OneDrive Business - if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) { + if !client.IsBusiness || (client.TestMode && rand.Int()%2 == 0) { url := client.APIURL + "/root:/" + path + ":/content" readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream") @@ -386,17 +388,17 @@ func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string type CreateUploadSessionItem struct { ConflictBehavior string `json:"@microsoft.graph.conflictBehavior"` - Name string `json:"name"` + Name string `json:"name"` } - input := map[string]interface{} { - "item": CreateUploadSessionItem { + input := map[string]interface{}{ + "item": CreateUploadSessionItem{ ConflictBehavior: "replace", - Name: filepath.Base(path), + Name: filepath.Base(path), }, } - readCloser, _, err := client.call(client.APIURL + "/root:/" + path + ":/createUploadSession", "POST", input, "application/json") + readCloser, _, err := client.call(client.APIURL+"/root:/"+path+":/createUploadSession", "POST", input, "application/json") if err != nil { return "", err } diff --git a/src/duplicacy_preference.go b/src/duplicacy_preference.go index fe24b220..15104034 100644 --- a/src/duplicacy_preference.go +++ b/src/duplicacy_preference.go @@ -6,7 +6,6 @@ package duplicacy import ( "encoding/json" - "io/ioutil" "os" "path" "reflect" @@ -15,18 +14,18 @@ import ( // Preference stores options for each storage. type Preference struct { - Name string `json:"name"` - SnapshotID string `json:"id"` - RepositoryPath string `json:"repository"` - StorageURL string `json:"storage"` - Encrypted bool `json:"encrypted"` - BackupProhibited bool `json:"no_backup"` - RestoreProhibited bool `json:"no_restore"` - DoNotSavePassword bool `json:"no_save_password"` - NobackupFile string `json:"nobackup_file"` - Keys map[string]string `json:"keys"` - FiltersFile string `json:"filters"` - ExcludeByAttribute bool `json:"exclude_by_attribute"` + Name string `json:"name"` + SnapshotID string `json:"id"` + RepositoryPath string `json:"repository"` + StorageURL string `json:"storage"` + Encrypted bool `json:"encrypted"` + BackupProhibited bool `json:"no_backup"` + RestoreProhibited bool `json:"no_restore"` + DoNotSavePassword bool `json:"no_save_password"` + NobackupFile string `json:"nobackup_file"` + Keys map[string]string `json:"keys"` + FiltersFile string `json:"filters"` + ExcludeByAttribute bool `json:"exclude_by_attribute"` } var preferencePath string @@ -43,7 +42,7 @@ func LoadPreferences(repository string) bool { } if !stat.IsDir() { - content, err := ioutil.ReadFile(preferencePath) + content, err := os.ReadFile(preferencePath) if err != nil { LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err) return false @@ -61,7 +60,7 @@ func LoadPreferences(repository string) bool { preferencePath = realPreferencePath } - description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences")) + description, err := os.ReadFile(path.Join(preferencePath, "preferences")) if err != nil { LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err) return false @@ -110,7 +109,7 @@ func SavePreferences() bool { } preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences") - err = ioutil.WriteFile(preferenceFile, description, 0600) + err = os.WriteFile(preferenceFile, description, 0600) if err != nil { LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err) return false diff --git a/src/duplicacy_shadowcopy_darwin.go b/src/duplicacy_shadowcopy_darwin.go index 1d8a3b64..bed25923 100755 --- a/src/duplicacy_shadowcopy_darwin.go +++ b/src/duplicacy_shadowcopy_darwin.go @@ -10,7 +10,6 @@ package duplicacy import ( "context" "errors" - "io/ioutil" "os" "os/exec" "regexp" @@ -136,7 +135,7 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow } // Create mount point - snapshotPath, err = ioutil.TempDir("/tmp/", "snp_") + snapshotPath, err = os.MkdirTemp("/tmp/", "snp_") if err != nil { LOG_ERROR("VSS_CREATE", "Failed to create temporary mount directory") return top @@ -163,7 +162,7 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow return top } snapshotName := "com.apple.TimeMachine." + snapshotDate - + snapshotNameRegex := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`) matched = snapshotNameRegex.FindStringSubmatch(tmutilOutput) if len(matched) > 0 { @@ -171,7 +170,7 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow } else { LOG_INFO("VSS_CREATE", "Can't find the snapshot name with 'tmutil listlocalsnapshots'; fallback to %s", snapshotName) } - + // Mount snapshot as readonly and hide from GUI i.e. Finder _, err = CommandWithTimeout(timeoutInSeconds, "/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/System/Volumes/Data", snapshotPath) diff --git a/src/duplicacy_snapshot.go b/src/duplicacy_snapshot.go index efac81d6..083e0752 100644 --- a/src/duplicacy_snapshot.go +++ b/src/duplicacy_snapshot.go @@ -9,15 +9,13 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "path/filepath" + "sort" "strings" "time" - "sort" - - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack" ) // Snapshot represents a backup of the repository. @@ -60,12 +58,12 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) { type DirectoryListing struct { directory string - files *[]Entry + files *[]Entry } func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string, - filtersFile string, excludeByAttribute bool, listingChannel chan *Entry, - skippedDirectories *[]string, skippedFiles *[]string) { + filtersFile string, excludeByAttribute bool, listingChannel chan *Entry, + skippedDirectories *[]string, skippedFiles *[]string) { var patterns []string @@ -104,7 +102,7 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string, close(listingChannel) } -func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOperator, entryOut func(*Entry) bool) { +func (snapshot *Snapshot) ListRemoteFiles(config *Config, chunkOperator *ChunkOperator, entryOut func(*Entry) bool) { var chunks []string for _, chunkHash := range snapshot.FileSequence { @@ -124,12 +122,12 @@ func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOpe if chunk != nil { config.PutChunk(chunk) } - } () + }() // Normally if Version is 0 then the snapshot is created by CLI v2 but unfortunately CLI 3.0.1 does not set the // version bit correctly when copying old backups. So we need to check the first byte -- if it is '[' then it is // the old format. The new format starts with a string encoded in msgpack and the first byte can't be '['. - if snapshot.Version == 0 || reader.GetFirstByte() == '['{ + if snapshot.Version == 0 || reader.GetFirstByte() == '[' { LOG_INFO("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in an old version format", snapshot.ID, snapshot.Revision) files := make([]*Entry, 0) decoder := json.NewDecoder(reader) @@ -200,7 +198,7 @@ func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOpe } else { LOG_ERROR("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in unsupported version %d format", - snapshot.ID, snapshot.Revision, snapshot.Version) + snapshot.ID, snapshot.Revision, snapshot.Version) return } @@ -243,7 +241,7 @@ func ProcessFilterFile(patternFile string, includedFiles []string) (patterns []s } includedFiles = append(includedFiles, patternFile) LOG_INFO("SNAPSHOT_FILTER", "Parsing filter file %s", patternFile) - patternFileContent, err := ioutil.ReadFile(patternFile) + patternFileContent, err := os.ReadFile(patternFile) if err == nil { patternFileLines := strings.Split(string(patternFileContent), "\n") patterns = ProcessFilterLines(patternFileLines, includedFiles) @@ -263,7 +261,7 @@ func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patt if patternIncludeFile == "" { continue } - if ! filepath.IsAbs(patternIncludeFile) { + if !filepath.IsAbs(patternIncludeFile) { basePath := "" if len(includedFiles) == 0 { basePath, _ = os.Getwd() @@ -490,4 +488,3 @@ func encodeSequence(sequence []string) []string { return sequenceInHex } - diff --git a/src/duplicacy_snapshotmanager.go b/src/duplicacy_snapshotmanager.go index 0482512b..0abd3c6b 100644 --- a/src/duplicacy_snapshotmanager.go +++ b/src/duplicacy_snapshotmanager.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math" "os" "path" @@ -18,10 +17,10 @@ import ( "sort" "strconv" "strings" - "text/tabwriter" - "time" "sync" "sync/atomic" + "text/tabwriter" + "time" "github.com/aryann/difflib" ) @@ -191,7 +190,7 @@ type SnapshotManager struct { fileChunk *Chunk snapshotCache *FileStorage - chunkOperator *ChunkOperator + chunkOperator *ChunkOperator } // CreateSnapshotManager creates a snapshot manager @@ -407,7 +406,7 @@ func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, all continue } - description, err := ioutil.ReadFile(path.Join(manager.snapshotCache.storageDir, + description, err := os.ReadFile(path.Join(manager.snapshotCache.storageDir, "snapshots", snapshotFile)) if err != nil { LOG_WARN("SNAPSHOT_CACHE", "Failed to read the cached snapshot file: %v", err) @@ -738,7 +737,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList totalFileSize := int64(0) lastChunk := 0 - snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry)bool { + snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry) bool { if file.IsFile() { totalFiles++ totalFileSize += file.Size @@ -753,7 +752,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList return true }) - snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry)bool { + snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry) bool { if file.IsFile() { LOG_INFO("SNAPSHOT_FILE", "%s", file.String(maxSizeDigits)) } @@ -908,7 +907,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe _, exist, _, err := manager.storage.FindChunk(0, chunkID, false) if err != nil { LOG_WARN("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v", - chunkID, err) + chunkID, err) } else if exist { LOG_INFO("SNAPSHOT_VALIDATE", "Chunk %s is confirmed to exist", chunkID) continue @@ -1031,7 +1030,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe if err != nil { LOG_WARN("SNAPSHOT_VERIFY", "Failed to save the verified chunks file: %v", err) } else { - LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks) - numberOfVerifiedChunks) + LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks)-numberOfVerifiedChunks) } } } @@ -1073,7 +1072,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe defer CatchLogException() for { - chunkIndex, ok := <- chunkChannel + chunkIndex, ok := <-chunkChannel if !ok { wg.Done() return @@ -1093,14 +1092,14 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe elapsedTime := time.Now().Sub(startTime).Seconds() speed := int64(float64(downloadedChunkSize) / elapsedTime) - remainingTime := int64(float64(totalChunks - downloadedChunks) / float64(downloadedChunks) * elapsedTime) + remainingTime := int64(float64(totalChunks-downloadedChunks) / float64(downloadedChunks) * elapsedTime) percentage := float64(downloadedChunks) / float64(totalChunks) * 100.0 LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%", - chunkID, downloadedChunks, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage) + chunkID, downloadedChunks, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage) manager.config.PutChunk(chunk) } - } () + }() } for chunkIndex := range chunkHashes { @@ -1289,10 +1288,10 @@ func (manager *SnapshotManager) PrintSnapshot(snapshot *Snapshot) bool { } // Don't print the ending bracket - fmt.Printf("%s", string(description[:len(description) - 2])) + fmt.Printf("%s", string(description[:len(description)-2])) fmt.Printf(",\n \"files\": [\n") isFirstFile := true - snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (file *Entry) bool { + snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry) bool { fileDescription, _ := json.MarshalIndent(file.convertToObject(false), "", " ") @@ -1322,7 +1321,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool { } files := make([]*Entry, 0) - snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (file *Entry) bool { + snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry) bool { if file.IsFile() && file.Size != 0 { file.Attributes = nil files = append(files, file) @@ -1426,7 +1425,7 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, la func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) *Entry { var found *Entry - snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (entry *Entry) bool { + snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(entry *Entry) bool { if entry.Path == filePath { found = entry return false @@ -1479,8 +1478,8 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path file := manager.FindFile(snapshot, path, false) if !manager.RetrieveFile(snapshot, file, nil, func(chunk []byte) { - fmt.Printf("%s", chunk) - }) { + fmt.Printf("%s", chunk) + }) { LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d", path, snapshot.ID, snapshot.Revision) return false @@ -1500,7 +1499,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions [] defer func() { manager.chunkOperator.Stop() manager.chunkOperator = nil - } () + }() var leftSnapshot *Snapshot var rightSnapshot *Snapshot @@ -1517,10 +1516,10 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions [] go func() { defer CatchLogException() rightSnapshot.ListLocalFiles(top, nobackupFile, filtersFile, excludeByAttribute, localListingChannel, nil, nil) - } () + }() for entry := range localListingChannel { - entry.Attributes = nil // attributes are not compared + entry.Attributes = nil // attributes are not compared rightSnapshotFiles = append(rightSnapshotFiles, entry) } @@ -1564,7 +1563,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions [] } } else { var err error - rightFile, err = ioutil.ReadFile(joinPath(top, filePath)) + rightFile, err = os.ReadFile(joinPath(top, filePath)) if err != nil { LOG_ERROR("SNAPSHOT_DIFF", "Failed to read %s from the repository: %v", filePath, err) return false @@ -1725,7 +1724,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis defer func() { manager.chunkOperator.Stop() manager.chunkOperator = nil - } () + }() var err error @@ -1821,15 +1820,16 @@ func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string // PruneSnapshots deletes snapshots by revisions, tags, or a retention policy. The main idea is two-step // fossil collection. -// 1. Delete snapshots specified by revision, retention policy, with a tag. Find any resulting unreferenced -// chunks, and mark them as fossils (by renaming). After that, create a fossil collection file containing -// fossils collected during current run, and temporary files encountered. Also in the file is the latest -// revision for each snapshot id. Save this file to a local directory. // -// 2. On next run, check if there is any new revision for each snapshot. Or if the lastest revision is too -// old, for instance, more than 7 days. This step is to identify snapshots that were being created while -// step 1 is in progress. For each fossil reference by any of these snapshots, move them back to the -// normal chunk directory. +// 1. Delete snapshots specified by revision, retention policy, with a tag. Find any resulting unreferenced +// chunks, and mark them as fossils (by renaming). After that, create a fossil collection file containing +// fossils collected during current run, and temporary files encountered. Also in the file is the latest +// revision for each snapshot id. Save this file to a local directory. +// +// 2. On next run, check if there is any new revision for each snapshot. Or if the lastest revision is too +// old, for instance, more than 7 days. This step is to identify snapshots that were being created while +// step 1 is in progress. For each fossil reference by any of these snapshots, move them back to the +// normal chunk directory. // // Note that a snapshot being created when step 2 is in progress may reference a fossil. To avoid this // problem, never remove the lastest revision (unless exclusive is true), and only cache chunks referenced @@ -1853,7 +1853,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, defer func() { manager.chunkOperator.Stop() manager.chunkOperator = nil - } () + }() prefPath := GetDuplicacyPreferencePath() logDir := path.Join(prefPath, "logs") @@ -2544,7 +2544,7 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) { numberOfChunks, len(snapshot.ChunkLengths)) } - snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (entry *Entry) bool { + snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(entry *Entry) bool { if lastEntry != nil && lastEntry.Compare(entry) >= 0 && !strings.Contains(lastEntry.Path, "\ufffd") { err = fmt.Errorf("The entry %s appears before the entry %s", lastEntry.Path, entry.Path) @@ -2598,7 +2598,7 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) { if entry.Size != fileSize { err = fmt.Errorf("The file %s has a size of %d but the total size of chunks is %d", entry.Path, entry.Size, fileSize) - return false + return false } return true @@ -2647,7 +2647,7 @@ func (manager *SnapshotManager) DownloadFile(path string, derivationKey string) err = manager.storage.UploadFile(0, path, newChunk.GetBytes()) if err != nil { LOG_WARN("DOWNLOAD_REWRITE", "Failed to re-uploaded the file %s: %v", path, err) - } else{ + } else { LOG_INFO("DOWNLOAD_REWRITE", "The file %s has been re-uploaded", path) } } diff --git a/src/duplicacy_snapshotmanager_test.go b/src/duplicacy_snapshotmanager_test.go index 5cabb2ea..5152e172 100644 --- a/src/duplicacy_snapshotmanager_test.go +++ b/src/duplicacy_snapshotmanager_test.go @@ -9,7 +9,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "io/ioutil" "os" "path" "strings" @@ -650,7 +649,7 @@ func TestPruneGhostSnapshots(t *testing.T) { createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag") checkTestSnapshots(snapshotManager, 2, 0) - snapshot1, err := ioutil.ReadFile(path.Join(testDir, "snapshots", "vm1@host1", "1")) + snapshot1, err := os.ReadFile(path.Join(testDir, "snapshots", "vm1@host1", "1")) if err != nil { t.Errorf("Failed to read snapshot file: %v", err) } @@ -662,7 +661,7 @@ func TestPruneGhostSnapshots(t *testing.T) { // Recover the snapshot file for revision 1; this is to simulate a scenario where prune may encounter a network error after // leaving the fossil collection but before deleting any snapshots. - err = ioutil.WriteFile(path.Join(testDir, "snapshots", "vm1@host1", "1"), snapshot1, 0644) + err = os.WriteFile(path.Join(testDir, "snapshots", "vm1@host1", "1"), snapshot1, 0644) if err != nil { t.Errorf("Failed to write snapshot file: %v", err) } diff --git a/src/duplicacy_storage.go b/src/duplicacy_storage.go index c1bd90f2..f462106c 100644 --- a/src/duplicacy_storage.go +++ b/src/duplicacy_storage.go @@ -7,7 +7,7 @@ package duplicacy import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net" "os" "path" @@ -174,7 +174,7 @@ func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error { } defer file.Close() - content, err := ioutil.ReadAll(file) + content, err := io.ReadAll(file) if err != nil { return err } @@ -344,7 +344,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor LOG_INFO("SSH_PUBLICKEY", "No private key file is provided") } else { var content []byte - content, err = ioutil.ReadFile(keyFile) + content, err = os.ReadFile(keyFile) if err != nil { LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err) } else { @@ -371,7 +371,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor if stat, err := os.Stat(certFile); err == nil && !stat.IsDir() { LOG_DEBUG("SSH_CERTIFICATE", "Attempting to use ssh certificate from file %s", certFile) var content []byte - content, err = ioutil.ReadFile(certFile) + content, err = os.ReadFile(certFile) if err != nil { LOG_INFO("SSH_CERTIFICATE", "Failed to read ssh certificate file %s: %v", certFile, err) } else { @@ -628,7 +628,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor // Handle writing directly to the root of the drive // For gcd://driveid@/, driveid@ is match[3] not match[2] if matched[2] == "" && strings.HasSuffix(matched[3], "@") { - matched[2], matched[3] = matched[3], matched[2] + matched[2], matched[3] = matched[3], matched[2] } driveID := matched[2] if driveID != "" { @@ -648,7 +648,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor // Handle writing directly to the root of the drive // For odb://drive_id@/, drive_id@ is match[3] not match[2] if matched[2] == "" && strings.HasSuffix(matched[3], "@") { - matched[2], matched[3] = matched[3], matched[2] + matched[2], matched[3] = matched[3], matched[2] } drive_id := matched[2] if len(drive_id) > 0 { @@ -656,17 +656,17 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor } storagePath := matched[3] + matched[4] prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):") - tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword) + tokenFile := GetPassword(preference, matched[1]+"_token", prompt, true, resetPassword) // client_id, just like tokenFile, can be stored in preferences //prompt = fmt.Sprintf("Enter client_id for custom Azure app (if empty will use duplicacy.com one):") - client_id := GetPasswordFromPreference(preference, matched[1] + "_client_id") + client_id := GetPasswordFromPreference(preference, matched[1]+"_client_id") client_secret := "" if client_id != "" { // client_secret should go into keyring prompt = fmt.Sprintf("Enter client_secret for custom Azure app (if empty will use duplicacy.com one):") - client_secret = GetPassword(preference, matched[1] + "_client_secret", prompt, true, resetPassword) + client_secret = GetPassword(preference, matched[1]+"_client_secret", prompt, true, resetPassword) } oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads, client_id, client_secret, drive_id) @@ -675,9 +675,9 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor return nil } - SavePassword(preference, matched[1] + "_token", tokenFile) + SavePassword(preference, matched[1]+"_token", tokenFile) if client_id != "" { - SavePassword(preference, matched[1] + "_client_secret", client_secret) + SavePassword(preference, matched[1]+"_client_secret", client_secret) } return oneDriveStorage } else if matched[1] == "hubic" { @@ -746,7 +746,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor storageDir := "" index := strings.Index(bucket, "/") if index >= 0 { - storageDir = bucket[index + 1:] + storageDir = bucket[index+1:] bucket = bucket[:index] } apiKey := GetPassword(preference, "storj_key", "Enter the API access key:", true, resetPassword) @@ -795,7 +795,6 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor SavePassword(preference, "smb_password", password) return sambaStorage - } else { LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1]) return nil diff --git a/src/duplicacy_storage_test.go b/src/duplicacy_storage_test.go index 6f8b6388..a347445c 100644 --- a/src/duplicacy_storage_test.go +++ b/src/duplicacy_storage_test.go @@ -10,7 +10,6 @@ import ( "encoding/json" "flag" "fmt" - "io/ioutil" "os" "path" "runtime/debug" @@ -41,7 +40,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) { return storage, err } - description, err := ioutil.ReadFile("test_storage.conf") + description, err := os.ReadFile("test_storage.conf") if err != nil { return nil, err } diff --git a/src/duplicacy_utils_test.go b/src/duplicacy_utils_test.go index f53fa584..4658c562 100644 --- a/src/duplicacy_utils_test.go +++ b/src/duplicacy_utils_test.go @@ -7,7 +7,6 @@ package duplicacy import ( "bytes" "io" - "io/ioutil" "time" crypto_rand "crypto/rand" @@ -92,13 +91,13 @@ func TestMatchPattern(t *testing.T) { } } - for _, pattern := range []string{ "+", "-", "i:", "e:", "+a", "-a", "i:a", "e:a"} { + for _, pattern := range []string{"+", "-", "i:", "e:", "+a", "-a", "i:a", "e:a"} { if IsUnspecifiedFilter(pattern) { t.Errorf("pattern %s has a specified filter", pattern) } } - for _, pattern := range []string{ "i", "e", "ia", "ib", "a", "b"} { + for _, pattern := range []string{"i", "e", "ia", "ib", "a", "b"} { if !IsUnspecifiedFilter(pattern) { t.Errorf("pattern %s does not have a specified filter", pattern) } @@ -117,7 +116,7 @@ func TestRateLimit(t *testing.T) { rateLimiter := CreateRateLimitedReader(content, expectedRate) startTime := time.Now() - n, err := io.Copy(ioutil.Discard, rateLimiter) + n, err := io.Copy(io.Discard, rateLimiter) if err != nil { t.Errorf("Error reading from the rate limited reader: %v", err) return @@ -132,7 +131,7 @@ func TestRateLimit(t *testing.T) { t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate) startTime = time.Now() - n, err = RateLimitedCopy(ioutil.Discard, bytes.NewBuffer(content), expectedRate) + n, err = RateLimitedCopy(io.Discard, bytes.NewBuffer(content), expectedRate) if err != nil { t.Errorf("Error writing with rate limit: %v", err) return diff --git a/src/duplicacy_webdavstorage.go b/src/duplicacy_webdavstorage.go index 7777b17c..8a5536d6 100644 --- a/src/duplicacy_webdavstorage.go +++ b/src/duplicacy_webdavstorage.go @@ -2,10 +2,8 @@ // Free for personal use and commercial trial // Commercial use requires per-user licenses available from https://duplicacy.com // -// // This storage backend is based on the work by Yuri Karamani from https://github.com/karamani/webdavclnt, // released under the MIT license. -// package duplicacy import ( @@ -16,12 +14,13 @@ import ( "io" "math/rand" "net/http" + //"net/http/httputil" + "strconv" "strings" "sync" "time" - "io/ioutil" ) type WebDAVStorage struct { @@ -174,7 +173,7 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int, return response.Body, response.Header, nil } - io.Copy(ioutil.Discard, response.Body) + io.Copy(io.Discard, response.Body) response.Body.Close() if response.StatusCode == 301 { @@ -236,7 +235,7 @@ func (storage *WebDAVStorage) getProperties(uri string, depth int, properties .. return nil, err } defer readCloser.Close() - defer io.Copy(ioutil.Discard, readCloser) + defer io.Copy(io.Discard, readCloser) object := WebDAVMultiStatus{} err = xml.NewDecoder(readCloser).Decode(&object) @@ -323,7 +322,7 @@ func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []st // Add the directory to the directory cache storage.directoryCacheLock.Lock() - storage.directoryCache[dir + file] = 1 + storage.directoryCache[dir+file] = 1 storage.directoryCacheLock.Unlock() } @@ -350,8 +349,8 @@ func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exi m, exist := properties["/"+storage.storageDir+filePath] // If no properties exist for the given filePath, remove the trailing / from filePath and search again - if !exist && filePath != "" && filePath[len(filePath) - 1] == '/' { - m, exist = properties["/"+storage.storageDir+filePath[:len(filePath) - 1]] + if !exist && filePath != "" && filePath[len(filePath)-1] == '/' { + m, exist = properties["/"+storage.storageDir+filePath[:len(filePath)-1]] } if !exist { @@ -372,7 +371,7 @@ func (storage *WebDAVStorage) DeleteFile(threadIndex int, filePath string) (err if err != nil { return err } - io.Copy(ioutil.Discard, readCloser) + io.Copy(io.Discard, readCloser) readCloser.Close() return nil } @@ -383,7 +382,7 @@ func (storage *WebDAVStorage) MoveFile(threadIndex int, from string, to string) if err != nil { return err } - io.Copy(ioutil.Discard, readCloser) + io.Copy(io.Discard, readCloser) readCloser.Close() return nil } @@ -433,7 +432,7 @@ func (storage *WebDAVStorage) CreateDirectory(threadIndex int, dir string) (err } return err } - io.Copy(ioutil.Discard, readCloser) + io.Copy(io.Discard, readCloser) readCloser.Close() storage.directoryCacheLock.Lock() @@ -463,7 +462,7 @@ func (storage *WebDAVStorage) UploadFile(threadIndex int, filePath string, conte if err != nil { return err } - io.Copy(ioutil.Discard, readCloser) + io.Copy(io.Discard, readCloser) readCloser.Close() return nil }