diff --git a/Glacier.md b/Glacier.md new file mode 100644 index 00000000..e53f7f1d --- /dev/null +++ b/Glacier.md @@ -0,0 +1,19 @@ +# Beta COLD Storage AKA Glacier support + +## Objectif + + * Backup directly to Glacier + * Restore directly from Glacier + +## Usage + + * Add a new option "storageclass": "GLACIER" to your preference file + * Use a compatible S3CStorage driver. Actually only tested on Scaleway's Object Storage and C14 Glacier + * Data chunks will go directly to Glacier, snapshot chunks will stay in Standard storage. One can however move snapshot chunks to Glacier also + +## Caveats + + * Corner cases not tested + * When restore, duplicacy will try to move chunks from Glacier to Standard storage, *ONE BY ONE*, thus can be very slow. One can speed this up by moving all thunks to Standard at once + * You cannot move the config file nor the snapshots folder to Glacier + diff --git a/README.md b/README.md index 981c3d17..64278b89 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,7 @@ Duplicacy currently provides the following storage backends: * WebDAV (under beta testing) * pcloud (via WebDAV) * Box.com (via WebDAV) +* Scaleway with initial [Glacier](Glacier.md) support Please consult the [wiki page](https://github.com/gilbertchen/duplicacy/wiki/Storage-Backends) on how to set up Duplicacy to work with each cloud storage. diff --git a/src/duplicacy_acdstorage.go b/src/duplicacy_acdstorage.go index e5253737..ff542452 100644 --- a/src/duplicacy_acdstorage.go +++ b/src/duplicacy_acdstorage.go @@ -408,7 +408,7 @@ func (storage *ACDStorage) DownloadFile(threadIndex int, filePath string, chunk } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { parent := path.Dir(filePath) if parent == "." { parent = "" diff --git a/src/duplicacy_azurestorage.go b/src/duplicacy_azurestorage.go index e5f2413c..7c6ef48c 100644 --- a/src/duplicacy_azurestorage.go +++ b/src/duplicacy_azurestorage.go @@ -165,7 +165,7 @@ func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chun } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { tries := 0 diff --git a/src/duplicacy_b2storage.go b/src/duplicacy_b2storage.go index 52a94578..97a56000 100644 --- a/src/duplicacy_b2storage.go +++ b/src/duplicacy_b2storage.go @@ -216,7 +216,7 @@ func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk * } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { return storage.client.UploadFile(threadIndex, filePath, content, storage.UploadRateLimit/storage.client.Threads) } diff --git a/src/duplicacy_benchmark.go b/src/duplicacy_benchmark.go index 0851c825..e34a660d 100644 --- a/src/duplicacy_benchmark.go +++ b/src/duplicacy_benchmark.go @@ -186,7 +186,7 @@ func Benchmark(localDirectory string, storage Storage, fileSize int64, chunkSize startTime = float64(time.Now().UnixNano()) / 1e9 benchmarkRun(uploadThreads, chunkCount, func(threadIndex int, chunkIndex int) { - err := storage.UploadFile(threadIndex, fmt.Sprintf("benchmark/chunk%d", chunkIndex), chunks[chunkIndex]) + err := storage.UploadFile(threadIndex, fmt.Sprintf("benchmark/chunk%d", chunkIndex), chunks[chunkIndex], nil) if err != nil { LOG_ERROR("BENCHMARK_UPLOAD", "Failed to upload the chunk: %v", err) return diff --git a/src/duplicacy_chunkdownloader.go b/src/duplicacy_chunkdownloader.go index 468b2cf9..fa2923e7 100644 --- a/src/duplicacy_chunkdownloader.go +++ b/src/duplicacy_chunkdownloader.go @@ -8,6 +8,8 @@ import ( "io" "sync/atomic" "time" + + "github.com/gilbertchen/goamz/s3" ) // ChunkDownloadTask encapsulates information need to download a chunk. @@ -268,7 +270,7 @@ func (downloader *ChunkDownloader) WaitForCompletion() { } // Looping until there isn't a download task in progress - for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex + 1 < len(downloader.taskList) { + for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex+1 < len(downloader.taskList) { // Wait for a completion event first if downloader.numberOfActiveChunks > 0 { @@ -280,8 +282,8 @@ func (downloader *ChunkDownloader) WaitForCompletion() { } // Pass the tasks one by one to the download queue - if downloader.lastChunkIndex + 1 < len(downloader.taskList) { - task := &downloader.taskList[downloader.lastChunkIndex + 1] + if downloader.lastChunkIndex+1 < len(downloader.taskList) { + task := &downloader.taskList[downloader.lastChunkIndex+1] if task.isDownloading { downloader.lastChunkIndex++ continue @@ -358,6 +360,7 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT chunk.Reset(false) const MaxDownloadAttempts = 3 + const GlacierDelay = 1 for downloadAttempt := 0; ; downloadAttempt++ { // Find the chunk by ID first. @@ -417,15 +420,35 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk) if err != nil { _, isHubic := downloader.storage.(*HubicStorage) + s3c, isArgo := downloader.storage.(*S3CStorage) // Retry on EOF or if it is a Hubic backend as it may return 404 even when the chunk exists if (err == io.ErrUnexpectedEOF || isHubic) && downloadAttempt < MaxDownloadAttempts { LOG_WARN("DOWNLOAD_RETRY", "Failed to download the chunk %s: %v; retrying", chunkID, err) chunk.Reset(false) continue - } else { - LOG_ERROR("DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err) - return false } + if s3err, ok := err.(*s3.Error); ok { + // Hit by glacier + if s3err.StatusCode == 403 && isArgo { + if downloadAttempt == 0 { + LOG_DEBUG("TITANIC", "Requestion restore %v from GLACIER", chunkID) + err = s3c.RestoreFile(threadIndex, chunkPath, 1) // XXX FIXME : hardcoded 1 day retention + if err != nil { + LOG_WARN("DOWNLOAD_RETRY", "Restore %v from GLACIER failed: %v", chunkID, err) + return false + } + } + // retry up to 3 * 3 times + if downloadAttempt <= MaxDownloadAttempts*3 { + LOG_WARN("DOWNLOAD_RETRY", "Unable get chunk %s from GLACIER; retry in %v minte", chunkID, GlacierDelay) + chunk.Reset(false) + time.Sleep(GlacierDelay * time.Minute) // XXX : Will bloc here! + continue + } + } + } + LOG_ERROR("DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err) + return false } err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash) @@ -457,7 +480,7 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT if len(cachedPath) > 0 { // Save a copy to the local snapshot cache - err := downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes()) + err := downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes(), nil) if err != nil { LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err) } diff --git a/src/duplicacy_chunkuploader.go b/src/duplicacy_chunkuploader.go index b983fe0d..1223247b 100644 --- a/src/duplicacy_chunkuploader.go +++ b/src/duplicacy_chunkuploader.go @@ -91,10 +91,20 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo chunk := task.chunk chunkSize := chunk.GetLength() chunkID := chunk.GetID() + var stropt StorageOption // For a snapshot chunk, verify that its chunk id is correct if uploader.snapshotCache != nil { chunk.VerifyID() + // initialize StorageClass for meta chunk + if st, ok := uploader.storage.(*S3CStorage); ok { + stropt = st.stOptionMeta + } + } else { + // initialize StorageClass for data chunk + if st, ok := uploader.storage.(*S3CStorage); ok { + stropt = st.stOptionData + } } if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() { @@ -104,7 +114,7 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err) } else if exist { LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID) - } else if err = uploader.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil { + } else if err = uploader.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes(), stropt); err != nil { LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err) } else { LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID) @@ -135,7 +145,7 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo } if !uploader.config.dryRun { - err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes()) + err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes(), stropt) if err != nil { LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err) return false diff --git a/src/duplicacy_config.go b/src/duplicacy_config.go index 6692fea6..63a558a1 100644 --- a/src/duplicacy_config.go +++ b/src/duplicacy_config.go @@ -8,8 +8,8 @@ import ( "bytes" "crypto/hmac" "crypto/rand" - "crypto/sha256" "crypto/rsa" + "crypto/sha256" "crypto/x509" "encoding/binary" "encoding/hex" @@ -17,12 +17,12 @@ import ( "encoding/pem" "fmt" "hash" + "io/ioutil" "os" + "reflect" "runtime" "runtime/debug" "sync/atomic" - "io/ioutil" - "reflect" blake2 "github.com/minio/blake2b-simd" ) @@ -72,7 +72,7 @@ type Config struct { // for RSA encryption rsaPrivateKey *rsa.PrivateKey - rsaPublicKey *rsa.PublicKey + rsaPublicKey *rsa.PublicKey chunkPool chan *Chunk numberOfChunks int32 @@ -84,17 +84,17 @@ type aliasedConfig Config type jsonableConfig struct { *aliasedConfig - ChunkSeed string `json:"chunk-seed"` - HashKey string `json:"hash-key"` - IDKey string `json:"id-key"` - ChunkKey string `json:"chunk-key"` - FileKey string `json:"file-key"` + ChunkSeed string `json:"chunk-seed"` + HashKey string `json:"hash-key"` + IDKey string `json:"id-key"` + ChunkKey string `json:"chunk-key"` + FileKey string `json:"file-key"` RSAPublicKey string `json:"rsa-public-key"` } func (config *Config) MarshalJSON() ([]byte, error) { - publicKey := []byte {} + publicKey := []byte{} if config.rsaPublicKey != nil { publicKey, _ = x509.MarshalPKIXPublicKey(config.rsaPublicKey) } @@ -504,7 +504,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i } } - err = storage.UploadFile(0, "config", chunk.GetBytes()) + err = storage.UploadFile(0, "config", chunk.GetBytes(), nil) if err != nil { LOG_ERROR("CONFIG_INIT", "Failed to configure the storage: %v", err) return false diff --git a/src/duplicacy_dropboxstorage.go b/src/duplicacy_dropboxstorage.go index b8545cd7..da71aa17 100644 --- a/src/duplicacy_dropboxstorage.go +++ b/src/duplicacy_dropboxstorage.go @@ -208,7 +208,7 @@ func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, ch } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { if filePath != "" && filePath[0] != '/' { filePath = "/" + filePath } diff --git a/src/duplicacy_filestorage.go b/src/duplicacy_filestorage.go index 604cffa5..adfea499 100644 --- a/src/duplicacy_filestorage.go +++ b/src/duplicacy_filestorage.go @@ -147,7 +147,7 @@ func (storage *FileStorage) DownloadFile(threadIndex int, filePath string, chunk } // UploadFile writes 'content' to the file at 'filePath' -func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { fullPath := path.Join(storage.storageDir, filePath) diff --git a/src/duplicacy_gcdstorage.go b/src/duplicacy_gcdstorage.go index 02cc1b80..c15bf2de 100644 --- a/src/duplicacy_gcdstorage.go +++ b/src/duplicacy_gcdstorage.go @@ -38,8 +38,8 @@ type GCDStorage struct { service *drive.Service idCache map[string]string // only directories are saved in this cache idCacheLock sync.Mutex - backoffs []int // desired backoff time in seconds for each thread - attempts []int // number of failed attempts since last success for each thread + backoffs []int // desired backoff time in seconds for each thread + attempts []int // number of failed attempts since last success for each thread driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive createDirectoryLock sync.Mutex @@ -715,7 +715,7 @@ func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { // We never upload a fossil so there is no need to convert the path parent := path.Dir(filePath) diff --git a/src/duplicacy_gcsstorage.go b/src/duplicacy_gcsstorage.go index fc280926..ef754756 100644 --- a/src/duplicacy_gcsstorage.go +++ b/src/duplicacy_gcsstorage.go @@ -251,7 +251,7 @@ func (storage *GCSStorage) DownloadFile(threadIndex int, filePath string, chunk } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { backoff := 1 for { diff --git a/src/duplicacy_hubicstorage.go b/src/duplicacy_hubicstorage.go index d5c66738..8eda0668 100644 --- a/src/duplicacy_hubicstorage.go +++ b/src/duplicacy_hubicstorage.go @@ -175,7 +175,7 @@ func (storage *HubicStorage) DownloadFile(threadIndex int, filePath string, chun } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { return storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThreads) } diff --git a/src/duplicacy_onestorage.go b/src/duplicacy_onestorage.go index e6cb9c21..bcea10f5 100644 --- a/src/duplicacy_onestorage.go +++ b/src/duplicacy_onestorage.go @@ -216,7 +216,7 @@ func (storage *OneDriveStorage) DownloadFile(threadIndex int, filePath string, c } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { err = storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThread) if e, ok := err.(OneDriveError); ok && e.Status == 409 { diff --git a/src/duplicacy_preference.go b/src/duplicacy_preference.go index 6ec3904e..d623eacf 100644 --- a/src/duplicacy_preference.go +++ b/src/duplicacy_preference.go @@ -25,7 +25,8 @@ type Preference struct { DoNotSavePassword bool `json:"no_save_password"` NobackupFile string `json:"nobackup_file"` Keys map[string]string `json:"keys"` - FiltersFile string `json:"filters"` + FiltersFile string `json:"filters"` + StorageClass string `json:"storageclass"` } var preferencePath string diff --git a/src/duplicacy_s3cstorage.go b/src/duplicacy_s3cstorage.go index 76014cc6..8d5b2cf4 100644 --- a/src/duplicacy_s3cstorage.go +++ b/src/duplicacy_s3cstorage.go @@ -17,6 +17,9 @@ type S3CStorage struct { buckets []*s3.Bucket storageDir string + + stOptionMeta s3.Options // Option to use for metadata + stOptionData s3.Options // Option to use for file data } // CreateS3CStorage creates a amazon s3 storage object. @@ -54,8 +57,10 @@ func CreateS3CStorage(regionName string, endpoint string, bucketName string, sto } storage = &S3CStorage{ - buckets: buckets, - storageDir: storageDir, + buckets: buckets, + storageDir: storageDir, + stOptionMeta: s3.Options{Tagging: "TYPE=META"}, + stOptionData: s3.Options{Tagging: "TYPE=DATA"}, } storage.DerivedStorage = storage @@ -172,13 +177,24 @@ func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { - options := s3.Options{} + var options s3.Options + if sto, ok := storageOption.(s3.Options); ok { + options = sto + } + LOG_DEBUG("debug", "s3cOption %+v", options) reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.buckets)) return storage.buckets[threadIndex].PutReader(storage.storageDir+filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options) } +// RestoreFile request restore filePath from GLACIER to STANDARD storage with a retention of days +func (storage *S3CStorage) RestoreFile(threadIndex int, filePath string, days int) (err error) { + + LOG_DEBUG("RestoreObject", "RestoreObject %v", filePath) + return storage.buckets[threadIndex].RestoreObject(storage.storageDir+filePath, days) +} + // If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when // managing snapshots. func (storage *S3CStorage) IsCacheNeeded() bool { return true } diff --git a/src/duplicacy_s3storage.go b/src/duplicacy_s3storage.go index f205d8f9..c30f1ef6 100644 --- a/src/duplicacy_s3storage.go +++ b/src/duplicacy_s3storage.go @@ -216,7 +216,7 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk * } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { attempts := 0 diff --git a/src/duplicacy_sftpstorage.go b/src/duplicacy_sftpstorage.go index 06bcd507..f951ef3b 100644 --- a/src/duplicacy_sftpstorage.go +++ b/src/duplicacy_sftpstorage.go @@ -13,8 +13,8 @@ import ( "path" "runtime" "strings" - "time" "sync" + "time" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" @@ -119,9 +119,9 @@ func (storage *SFTPStorage) getSFTPClient() *sftp.Client { return storage.client } -func (storage *SFTPStorage) retry(f func () error) error { +func (storage *SFTPStorage) retry(f func() error) error { delay := time.Second - for i := 0;; i++ { + for i := 0; ; i++ { err := f() if err != nil && strings.Contains(err.Error(), "EOF") && i < storage.numberOfTries { LOG_WARN("SFTP_RETRY", "Encountered an error (%v); retry after %d second(s)", err, delay/time.Second) @@ -150,6 +150,7 @@ func (storage *SFTPStorage) retry(f func () error) error { return err } } + // ListFiles return the list of files and subdirectories under 'file' (non-recursively) func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []string, sizes []int64, err error) { @@ -207,8 +208,10 @@ func (storage *SFTPStorage) MoveFile(threadIndex int, from string, to string) (e if fileInfo != nil { return fmt.Errorf("The destination file %s already exists", toPath) } - err = storage.retry(func() error { return storage.getSFTPClient().Rename(path.Join(storage.storageDir, from), - path.Join(storage.storageDir, to)) }) + err = storage.retry(func() error { + return storage.getSFTPClient().Rename(path.Join(storage.storageDir, from), + path.Join(storage.storageDir, to)) + }) return err } @@ -267,7 +270,7 @@ func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { fullPath := path.Join(storage.storageDir, filePath) diff --git a/src/duplicacy_snapshotmanager.go b/src/duplicacy_snapshotmanager.go index 55053bfd..ade05a70 100644 --- a/src/duplicacy_snapshotmanager.go +++ b/src/duplicacy_snapshotmanager.go @@ -925,7 +925,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe _, exist, _, err := manager.storage.FindChunk(0, chunkID, false) if err != nil { LOG_WARN("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v", - chunkID, err) + chunkID, err) } else if exist { LOG_INFO("SNAPSHOT_VALIDATE", "Chunk %s is confirmed to exist", chunkID) continue @@ -1359,8 +1359,8 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path file := manager.FindFile(snapshot, path, false) if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { - fmt.Printf("%s", chunk) - }) { + fmt.Printf("%s", chunk) + }) { LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d", path, snapshot.ID, snapshot.Revision) return false @@ -2114,7 +2114,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, return false } - err = manager.snapshotCache.UploadFile(0, collectionFile, description) + err = manager.snapshotCache.UploadFile(0, collectionFile, description, nil) if err != nil { LOG_ERROR("FOSSIL_COLLECT", "Failed to save the fossil collection file %s: %v", collectionFile, err) return false @@ -2510,7 +2510,7 @@ func (manager *SnapshotManager) DownloadFile(path string, derivationKey string) return nil } - err = manager.snapshotCache.UploadFile(0, path, manager.fileChunk.GetBytes()) + err = manager.snapshotCache.UploadFile(0, path, manager.fileChunk.GetBytes(), nil) if err != nil { LOG_WARN("DOWNLOAD_FILE_CACHE", "Failed to add the file %s to the snapshot cache: %v", path, err) } @@ -2526,7 +2526,7 @@ func (manager *SnapshotManager) UploadFile(path string, derivationKey string, co manager.fileChunk.Write(content) if manager.storage.IsCacheNeeded() { - err := manager.snapshotCache.UploadFile(0, path, manager.fileChunk.GetBytes()) + err := manager.snapshotCache.UploadFile(0, path, manager.fileChunk.GetBytes(), nil) if err != nil { LOG_WARN("UPLOAD_CACHE", "Failed to cache the file %s: %v", path, err) } else { @@ -2544,7 +2544,7 @@ func (manager *SnapshotManager) UploadFile(path string, derivationKey string, co return false } - err = manager.storage.UploadFile(0, path, manager.fileChunk.GetBytes()) + err = manager.storage.UploadFile(0, path, manager.fileChunk.GetBytes(), nil) if err != nil { LOG_ERROR("UPLOAD_File", "Failed to upload the file %s: %v", path, err) return false diff --git a/src/duplicacy_storage.go b/src/duplicacy_storage.go index 33ed6c46..4b419e6e 100644 --- a/src/duplicacy_storage.go +++ b/src/duplicacy_storage.go @@ -46,7 +46,7 @@ type Storage interface { DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) // UploadFile writes 'content' to the file at 'filePath'. - UploadFile(threadIndex int, filePath string, content []byte) (err error) + UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) // SetNestingLevels sets up the chunk nesting structure. SetNestingLevels(config *Config) @@ -71,6 +71,10 @@ type Storage interface { SetRateLimits(downloadRateLimit int, uploadRateLimit int) } +// StorageOption is a flatten options for all kind of storage +type StorageOption interface { +} + // StorageBase is the base struct from which all storages are derived from type StorageBase struct { DownloadRateLimit int // Maximum download rate (bytes/seconds) @@ -80,6 +84,7 @@ type StorageBase struct { readLevels []int // At which nesting level to find the chunk with the given id writeLevel int // Store the uploaded chunk to this level + } // SetRateLimits sets the maximum download and upload rates @@ -484,11 +489,13 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor var err error if matched[1] == "s3c" { - storage, err = CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads) + s3cstorage, err := CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads) if err != nil { LOG_ERROR("STORAGE_CREATE", "Failed to load the S3C storage at %s: %v", storageURL, err) return nil } + s3cstorage.stOptionData.StorageClass = preference.StorageClass + storage = s3cstorage } else { isMinioCompatible := (matched[1] == "minio" || matched[1] == "minios") isSSLSupported := (matched[1] == "s3" || matched[1] == "minios") @@ -627,7 +634,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor // Handle writing directly to the root of the drive // For gcd://driveid@/, driveid@ is match[3] not match[2] if matched[2] == "" && strings.HasSuffix(matched[3], "@") { - matched[2], matched[3] = matched[3], matched[2] + matched[2], matched[3] = matched[3], matched[2] } driveID := matched[2] if driveID != "" { @@ -646,13 +653,13 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor } else if matched[1] == "one" || matched[1] == "odb" { storagePath := matched[3] + matched[4] prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):") - tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword) + tokenFile := GetPassword(preference, matched[1]+"_token", prompt, true, resetPassword) oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads) if err != nil { LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err) return nil } - SavePassword(preference, matched[1] + "_token", tokenFile) + SavePassword(preference, matched[1]+"_token", tokenFile) return oneDriveStorage } else if matched[1] == "hubic" { storagePath := matched[3] + matched[4] diff --git a/src/duplicacy_swiftstorage.go b/src/duplicacy_swiftstorage.go index 3d360571..6c93e80b 100644 --- a/src/duplicacy_swiftstorage.go +++ b/src/duplicacy_swiftstorage.go @@ -227,7 +227,7 @@ func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chun } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *SwiftStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *SwiftStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.threads) _, err = storage.connection.ObjectPut(storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil) return err diff --git a/src/duplicacy_wasabistorage.go b/src/duplicacy_wasabistorage.go index 76f8b63a..32593427 100644 --- a/src/duplicacy_wasabistorage.go +++ b/src/duplicacy_wasabistorage.go @@ -165,9 +165,9 @@ func (storage *WasabiStorage) DownloadFile( } func (storage *WasabiStorage) UploadFile( - threadIndex int, filePath string, content []byte, + threadIndex int, filePath string, content []byte, storageOption StorageOption, ) (err error) { - return storage.s3.UploadFile(threadIndex, filePath, content) + return storage.s3.UploadFile(threadIndex, filePath, content, storageOption) } func (storage *WasabiStorage) IsCacheNeeded() bool { diff --git a/src/duplicacy_webdavstorage.go b/src/duplicacy_webdavstorage.go index 176145d5..50239e32 100644 --- a/src/duplicacy_webdavstorage.go +++ b/src/duplicacy_webdavstorage.go @@ -333,8 +333,8 @@ func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exi m, exist := properties["/"+storage.storageDir+filePath] // If no properties exist for the given filePath, remove the trailing / from filePath and search again - if !exist && filePath != "" && filePath[len(filePath) - 1] == '/' { - m, exist = properties["/"+storage.storageDir+filePath[:len(filePath) - 1]] + if !exist && filePath != "" && filePath[len(filePath)-1] == '/' { + m, exist = properties["/"+storage.storageDir+filePath[:len(filePath)-1]] } if !exist { @@ -432,7 +432,7 @@ func (storage *WebDAVStorage) DownloadFile(threadIndex int, filePath string, chu } // UploadFile writes 'content' to the file at 'filePath'. -func (storage *WebDAVStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) { +func (storage *WebDAVStorage) UploadFile(threadIndex int, filePath string, content []byte, storageOption StorageOption) (err error) { // If there is an error in creating the parent directory, proceed anyway storage.createParentDirectory(threadIndex, filePath)