summaryrefslogtreecommitdiffstats
path: root/modules/lfs
diff options
context:
space:
mode:
authorwxiaoguang <wxiaoguang@gmail.com>2023-03-28 17:10:24 +0200
committerGitHub <noreply@github.com>2023-03-28 17:10:24 +0200
commit5727056ea109eb04ee535b981349cdfb44df9fae (patch)
treeaf2b96f7b7871dd3d07169642b6234369c032632 /modules/lfs
parentYarden Shoham has a new email address (#23767) (diff)
downloadforgejo-5727056ea109eb04ee535b981349cdfb44df9fae.tar.xz
forgejo-5727056ea109eb04ee535b981349cdfb44df9fae.zip
Make minio package support legacy MD5 checksum (#23768)
A feedback from discord: https://discord.com/channels/322538954119184384/561007778139734027/1090185427115319386 Some storages like: * https://developers.cloudflare.com/r2/api/s3/api/ * https://www.backblaze.com/b2/docs/s3_compatible_api.html They do not support "x-amz-checksum-algorithm" header But minio recently uses that header with CRC32C by default. So we have to tell minio to use legacy MD5 checksum. I guess this needs to be backported because IIRC we 1.19 and 1.20 are using similar minio package. The minio package code for SendContentMD5 looks like this: <details> <img width="755" alt="image" src="https://user-images.githubusercontent.com/2114189/228186768-4f2f6f67-62b9-4aee-9251-5af714ad9674.png"> </details>
Diffstat (limited to 'modules/lfs')
-rw-r--r--modules/lfs/content_store.go40
1 files changed, 29 insertions, 11 deletions
diff --git a/modules/lfs/content_store.go b/modules/lfs/content_store.go
index a4ae21bfd6..53fac4ab85 100644
--- a/modules/lfs/content_store.go
+++ b/modules/lfs/content_store.go
@@ -60,15 +60,22 @@ func (s *ContentStore) Put(pointer Pointer, r io.Reader) error {
return err
}
- // This shouldn't happen but it is sensible to test
- if written != pointer.Size {
- if err := s.Delete(p); err != nil {
- log.Error("Cleaning the LFS OID[%s] failed: %v", pointer.Oid, err)
+ // check again whether there is any error during the Save operation
+ // because some errors might be ignored by the Reader's caller
+ if wrappedRd.lastError != nil && !errors.Is(wrappedRd.lastError, io.EOF) {
+ err = wrappedRd.lastError
+ } else if written != pointer.Size {
+ err = ErrSizeMismatch
+ }
+
+ // if the upload failed, try to delete the file
+ if err != nil {
+ if errDel := s.Delete(p); errDel != nil {
+ log.Error("Cleaning the LFS OID[%s] failed: %v", pointer.Oid, errDel)
}
- return ErrSizeMismatch
}
- return nil
+ return err
}
// Exists returns true if the object exists in the content store.
@@ -109,6 +116,17 @@ type hashingReader struct {
expectedSize int64
hash hash.Hash
expectedHash string
+ lastError error
+}
+
+// recordError records the last error during the Save operation
+// Some callers of the Reader doesn't respect the returned "err"
+// For example, MinIO's Put will ignore errors if the written size could equal to expected size
+// So we must remember the error by ourselves,
+// and later check again whether ErrSizeMismatch or ErrHashMismatch occurs during the Save operation
+func (r *hashingReader) recordError(err error) error {
+ r.lastError = err
+ return err
}
func (r *hashingReader) Read(b []byte) (int, error) {
@@ -118,22 +136,22 @@ func (r *hashingReader) Read(b []byte) (int, error) {
r.currentSize += int64(n)
wn, werr := r.hash.Write(b[:n])
if wn != n || werr != nil {
- return n, werr
+ return n, r.recordError(werr)
}
}
- if err != nil && err == io.EOF {
+ if errors.Is(err, io.EOF) || r.currentSize >= r.expectedSize {
if r.currentSize != r.expectedSize {
- return n, ErrSizeMismatch
+ return n, r.recordError(ErrSizeMismatch)
}
shaStr := hex.EncodeToString(r.hash.Sum(nil))
if shaStr != r.expectedHash {
- return n, ErrHashMismatch
+ return n, r.recordError(ErrHashMismatch)
}
}
- return n, err
+ return n, r.recordError(err)
}
func newHashingReader(expectedSize int64, expectedHash string, reader io.Reader) *hashingReader {