Adding upstream version 0.28.1.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
88f1d47ab6
commit
e28c88ef14
933 changed files with 194711 additions and 0 deletions
59
tools/filesystem/internal/s3blob/s3/copy_object.go
Normal file
59
tools/filesystem/internal/s3blob/s3/copy_object.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html#API_CopyObject_ResponseSyntax
|
||||
type CopyObjectResponse struct {
|
||||
CopyObjectResult xml.Name `json:"copyObjectResult" xml:"CopyObjectResult"`
|
||||
ETag string `json:"etag" xml:"ETag"`
|
||||
LastModified time.Time `json:"lastModified" xml:"LastModified"`
|
||||
ChecksumType string `json:"checksumType" xml:"ChecksumType"`
|
||||
ChecksumCRC32 string `json:"checksumCRC32" xml:"ChecksumCRC32"`
|
||||
ChecksumCRC32C string `json:"checksumCRC32C" xml:"ChecksumCRC32C"`
|
||||
ChecksumCRC64NVME string `json:"checksumCRC64NVME" xml:"ChecksumCRC64NVME"`
|
||||
ChecksumSHA1 string `json:"checksumSHA1" xml:"ChecksumSHA1"`
|
||||
ChecksumSHA256 string `json:"checksumSHA256" xml:"ChecksumSHA256"`
|
||||
}
|
||||
|
||||
// CopyObject copies a single object from srcKey to dstKey destination.
|
||||
// (both keys are expected to be operating within the same bucket).
|
||||
//
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
|
||||
func (s3 *S3) CopyObject(ctx context.Context, srcKey string, dstKey string, optReqFuncs ...func(*http.Request)) (*CopyObjectResponse, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, s3.URL(dstKey), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// per the doc the header value must be URL-encoded
|
||||
req.Header.Set("x-amz-copy-source", url.PathEscape(s3.Bucket+"/"+strings.TrimLeft(srcKey, "/")))
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optReqFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := s3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
result := &CopyObjectResponse{}
|
||||
|
||||
err = xml.NewDecoder(resp.Body).Decode(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
67
tools/filesystem/internal/s3blob/s3/copy_object_test.go
Normal file
67
tools/filesystem/internal/s3blob/s3/copy_object_test.go
Normal file
|
@ -0,0 +1,67 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
|
||||
)
|
||||
|
||||
func TestS3CopyObject(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
httpClient := tests.NewClient(
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPut,
|
||||
URL: "http://test_bucket.example.com/@dst_test",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"x-amz-copy-source": "test_bucket%2F@src_test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Body: io.NopCloser(strings.NewReader(`
|
||||
<CopyObjectResult>
|
||||
<LastModified>2025-01-01T01:02:03.456Z</LastModified>
|
||||
<ETag>test_etag</ETag>
|
||||
</CopyObjectResult>
|
||||
`)),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
s3Client := &s3.S3{
|
||||
Client: httpClient,
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
}
|
||||
|
||||
copyResp, err := s3Client.CopyObject(context.Background(), "@src_test", "@dst_test", func(r *http.Request) {
|
||||
r.Header.Set("test_header", "test")
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = httpClient.AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if copyResp.ETag != "test_etag" {
|
||||
t.Fatalf("Expected ETag %q, got %q", "test_etag", copyResp.ETag)
|
||||
}
|
||||
|
||||
if date := copyResp.LastModified.Format("2006-01-02T15:04:05.000Z"); date != "2025-01-01T01:02:03.456Z" {
|
||||
t.Fatalf("Expected LastModified %q, got %q", "2025-01-01T01:02:03.456Z", date)
|
||||
}
|
||||
}
|
31
tools/filesystem/internal/s3blob/s3/delete_object.go
Normal file
31
tools/filesystem/internal/s3blob/s3/delete_object.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DeleteObject deletes a single object by its key.
|
||||
//
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
|
||||
func (s3 *S3) DeleteObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, s3.URL(key), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := s3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
48
tools/filesystem/internal/s3blob/s3/delete_object_test.go
Normal file
48
tools/filesystem/internal/s3blob/s3/delete_object_test.go
Normal file
|
@ -0,0 +1,48 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
|
||||
)
|
||||
|
||||
func TestS3DeleteObject(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
httpClient := tests.NewClient(
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodDelete,
|
||||
URL: "http://test_bucket.example.com/test_key",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
s3Client := &s3.S3{
|
||||
Client: httpClient,
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
}
|
||||
|
||||
err := s3Client.DeleteObject(context.Background(), "test_key", func(r *http.Request) {
|
||||
r.Header.Set("test_header", "test")
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = httpClient.AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
49
tools/filesystem/internal/s3blob/s3/error.go
Normal file
49
tools/filesystem/internal/s3blob/s3/error.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var _ error = (*ResponseError)(nil)
|
||||
|
||||
// ResponseError defines a general S3 response error.
|
||||
//
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
type ResponseError struct {
|
||||
XMLName xml.Name `json:"-" xml:"Error"`
|
||||
Code string `json:"code" xml:"Code"`
|
||||
Message string `json:"message" xml:"Message"`
|
||||
RequestId string `json:"requestId" xml:"RequestId"`
|
||||
Resource string `json:"resource" xml:"Resource"`
|
||||
Raw []byte `json:"-" xml:"-"`
|
||||
Status int `json:"status" xml:"Status"`
|
||||
}
|
||||
|
||||
// Error implements the std error interface.
|
||||
func (err *ResponseError) Error() string {
|
||||
var strBuilder strings.Builder
|
||||
|
||||
strBuilder.WriteString(strconv.Itoa(err.Status))
|
||||
strBuilder.WriteString(" ")
|
||||
|
||||
if err.Code != "" {
|
||||
strBuilder.WriteString(err.Code)
|
||||
} else {
|
||||
strBuilder.WriteString("S3ResponseError")
|
||||
}
|
||||
|
||||
if err.Message != "" {
|
||||
strBuilder.WriteString(": ")
|
||||
strBuilder.WriteString(err.Message)
|
||||
}
|
||||
|
||||
if len(err.Raw) > 0 {
|
||||
strBuilder.WriteString("\n(RAW: ")
|
||||
strBuilder.Write(err.Raw)
|
||||
strBuilder.WriteString(")")
|
||||
}
|
||||
|
||||
return strBuilder.String()
|
||||
}
|
86
tools/filesystem/internal/s3blob/s3/error_test.go
Normal file
86
tools/filesystem/internal/s3blob/s3/error_test.go
Normal file
|
@ -0,0 +1,86 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
|
||||
)
|
||||
|
||||
func TestResponseErrorSerialization(t *testing.T) {
|
||||
raw := `
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Error>
|
||||
<Code>test_code</Code>
|
||||
<Message>test_message</Message>
|
||||
<RequestId>test_request_id</RequestId>
|
||||
<Resource>test_resource</Resource>
|
||||
</Error>
|
||||
`
|
||||
|
||||
respErr := &s3.ResponseError{
|
||||
Status: 123,
|
||||
Raw: []byte("test"),
|
||||
}
|
||||
|
||||
err := xml.Unmarshal([]byte(raw), &respErr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
jsonRaw, err := json.Marshal(respErr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
jsonStr := string(jsonRaw)
|
||||
|
||||
expected := `{"code":"test_code","message":"test_message","requestId":"test_request_id","resource":"test_resource","status":123}`
|
||||
|
||||
if expected != jsonStr {
|
||||
t.Fatalf("Expected JSON\n%s\ngot\n%s", expected, jsonStr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponseErrorErrorInterface(t *testing.T) {
|
||||
scenarios := []struct {
|
||||
name string
|
||||
err *s3.ResponseError
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
"empty",
|
||||
&s3.ResponseError{},
|
||||
"0 S3ResponseError",
|
||||
},
|
||||
{
|
||||
"with code and message (nil raw)",
|
||||
&s3.ResponseError{
|
||||
Status: 123,
|
||||
Code: "test_code",
|
||||
Message: "test_message",
|
||||
},
|
||||
"123 test_code: test_message",
|
||||
},
|
||||
{
|
||||
"with code and message (non-nil raw)",
|
||||
&s3.ResponseError{
|
||||
Status: 123,
|
||||
Code: "test_code",
|
||||
Message: "test_message",
|
||||
Raw: []byte("test_raw"),
|
||||
},
|
||||
"123 test_code: test_message\n(RAW: test_raw)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, s := range scenarios {
|
||||
t.Run(s.name, func(t *testing.T) {
|
||||
result := s.err.Error()
|
||||
|
||||
if result != s.expected {
|
||||
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
43
tools/filesystem/internal/s3blob/s3/get_object.go
Normal file
43
tools/filesystem/internal/s3blob/s3/get_object.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_ResponseElements
|
||||
type GetObjectResponse struct {
|
||||
Body io.ReadCloser `json:"-" xml:"-"`
|
||||
|
||||
HeadObjectResponse
|
||||
}
|
||||
|
||||
// GetObject retrieves a single object by its key.
|
||||
//
|
||||
// NB! Make sure to call GetObjectResponse.Body.Close() after done working with the result.
|
||||
//
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
|
||||
func (s3 *S3) GetObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) (*GetObjectResponse, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, s3.URL(key), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := s3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := &GetObjectResponse{Body: resp.Body}
|
||||
result.load(resp.Header)
|
||||
|
||||
return result, nil
|
||||
}
|
92
tools/filesystem/internal/s3blob/s3/get_object_test.go
Normal file
92
tools/filesystem/internal/s3blob/s3/get_object_test.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
|
||||
)
|
||||
|
||||
func TestS3GetObject(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
httpClient := tests.NewClient(
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodGet,
|
||||
URL: "http://test_bucket.example.com/test_key",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Header: http.Header{
|
||||
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
|
||||
"Cache-Control": []string{"test_cache"},
|
||||
"Content-Disposition": []string{"test_disposition"},
|
||||
"Content-Encoding": []string{"test_encoding"},
|
||||
"Content-Language": []string{"test_language"},
|
||||
"Content-Type": []string{"test_type"},
|
||||
"Content-Range": []string{"test_range"},
|
||||
"Etag": []string{"test_etag"},
|
||||
"Content-Length": []string{"100"},
|
||||
"x-amz-meta-AbC": []string{"test_meta_a"},
|
||||
"x-amz-meta-Def": []string{"test_meta_b"},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("test")),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
s3Client := &s3.S3{
|
||||
Client: httpClient,
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
}
|
||||
|
||||
resp, err := s3Client.GetObject(context.Background(), "test_key", func(r *http.Request) {
|
||||
r.Header.Set("test_header", "test")
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = httpClient.AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bodyStr := string(body)
|
||||
|
||||
if bodyStr != "test" {
|
||||
t.Fatalf("Expected body\n%q\ngot\n%q", "test", bodyStr)
|
||||
}
|
||||
|
||||
// check serialized attributes
|
||||
raw, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rawStr := string(raw)
|
||||
|
||||
expected := `{"metadata":{"abc":"test_meta_a","def":"test_meta_b"},"lastModified":"2025-02-01T03:04:05Z","cacheControl":"test_cache","contentDisposition":"test_disposition","contentEncoding":"test_encoding","contentLanguage":"test_language","contentType":"test_type","contentRange":"test_range","etag":"test_etag","contentLength":100}`
|
||||
|
||||
if rawStr != expected {
|
||||
t.Fatalf("Expected attributes\n%s\ngot\n%s", expected, rawStr)
|
||||
}
|
||||
}
|
89
tools/filesystem/internal/s3blob/s3/head_object.go
Normal file
89
tools/filesystem/internal/s3blob/s3/head_object.go
Normal file
|
@ -0,0 +1,89 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseElements
|
||||
type HeadObjectResponse struct {
|
||||
// Metadata is the extra data that is stored with the S3 object (aka. the "x-amz-meta-*" header values).
|
||||
//
|
||||
// The map keys are normalized to lower-case.
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
|
||||
// LastModified date and time when the object was last modified.
|
||||
LastModified time.Time `json:"lastModified"`
|
||||
|
||||
// CacheControl specifies caching behavior along the request/reply chain.
|
||||
CacheControl string `json:"cacheControl"`
|
||||
|
||||
// ContentDisposition specifies presentational information for the object.
|
||||
ContentDisposition string `json:"contentDisposition"`
|
||||
|
||||
// ContentEncoding indicates what content encodings have been applied to the object
|
||||
// and thus what decoding mechanisms must be applied to obtain the
|
||||
// media-type referenced by the Content-Type header field.
|
||||
ContentEncoding string `json:"contentEncoding"`
|
||||
|
||||
// ContentLanguage indicates the language the content is in.
|
||||
ContentLanguage string `json:"contentLanguage"`
|
||||
|
||||
// ContentType is a standard MIME type describing the format of the object data.
|
||||
ContentType string `json:"contentType"`
|
||||
|
||||
// ContentRange is the portion of the object usually returned in the response for a GET request.
|
||||
ContentRange string `json:"contentRange"`
|
||||
|
||||
// ETag is an opaque identifier assigned by a web
|
||||
// server to a specific version of a resource found at a URL.
|
||||
ETag string `json:"etag"`
|
||||
|
||||
// ContentLength is size of the body in bytes.
|
||||
ContentLength int64 `json:"contentLength"`
|
||||
}
|
||||
|
||||
// load parses and load the header values into the current HeadObjectResponse fields.
|
||||
func (o *HeadObjectResponse) load(headers http.Header) {
|
||||
o.LastModified, _ = time.Parse(time.RFC1123, headers.Get("Last-Modified"))
|
||||
o.CacheControl = headers.Get("Cache-Control")
|
||||
o.ContentDisposition = headers.Get("Content-Disposition")
|
||||
o.ContentEncoding = headers.Get("Content-Encoding")
|
||||
o.ContentLanguage = headers.Get("Content-Language")
|
||||
o.ContentType = headers.Get("Content-Type")
|
||||
o.ContentRange = headers.Get("Content-Range")
|
||||
o.ETag = headers.Get("ETag")
|
||||
o.ContentLength, _ = strconv.ParseInt(headers.Get("Content-Length"), 10, 0)
|
||||
o.Metadata = extractMetadata(headers)
|
||||
}
|
||||
|
||||
// HeadObject sends a HEAD request for a single object to check its
|
||||
// existence and to retrieve its metadata.
|
||||
//
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html
|
||||
func (s3 *S3) HeadObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) (*HeadObjectResponse, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, s3.URL(key), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := s3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
result := &HeadObjectResponse{}
|
||||
result.load(resp.Header)
|
||||
|
||||
return result, nil
|
||||
}
|
77
tools/filesystem/internal/s3blob/s3/head_object_test.go
Normal file
77
tools/filesystem/internal/s3blob/s3/head_object_test.go
Normal file
|
@ -0,0 +1,77 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
|
||||
)
|
||||
|
||||
func TestS3HeadObject(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
httpClient := tests.NewClient(
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodHead,
|
||||
URL: "http://test_bucket.example.com/test_key",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Header: http.Header{
|
||||
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
|
||||
"Cache-Control": []string{"test_cache"},
|
||||
"Content-Disposition": []string{"test_disposition"},
|
||||
"Content-Encoding": []string{"test_encoding"},
|
||||
"Content-Language": []string{"test_language"},
|
||||
"Content-Type": []string{"test_type"},
|
||||
"Content-Range": []string{"test_range"},
|
||||
"Etag": []string{"test_etag"},
|
||||
"Content-Length": []string{"100"},
|
||||
"x-amz-meta-AbC": []string{"test_meta_a"},
|
||||
"x-amz-meta-Def": []string{"test_meta_b"},
|
||||
},
|
||||
Body: http.NoBody,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
s3Client := &s3.S3{
|
||||
Client: httpClient,
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
}
|
||||
|
||||
resp, err := s3Client.HeadObject(context.Background(), "test_key", func(r *http.Request) {
|
||||
r.Header.Set("test_header", "test")
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = httpClient.AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
raw, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rawStr := string(raw)
|
||||
|
||||
expected := `{"metadata":{"abc":"test_meta_a","def":"test_meta_b"},"lastModified":"2025-02-01T03:04:05Z","cacheControl":"test_cache","contentDisposition":"test_disposition","contentEncoding":"test_encoding","contentLanguage":"test_language","contentType":"test_type","contentRange":"test_range","etag":"test_etag","contentLength":100}`
|
||||
|
||||
if rawStr != expected {
|
||||
t.Fatalf("Expected response\n%s\ngot\n%s", expected, rawStr)
|
||||
}
|
||||
}
|
165
tools/filesystem/internal/s3blob/s3/list_objects.go
Normal file
165
tools/filesystem/internal/s3blob/s3/list_objects.go
Normal file
|
@ -0,0 +1,165 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ListParams defines optional parameters for the ListObject request.
|
||||
type ListParams struct {
|
||||
// ContinuationToken indicates that the list is being continued on this bucket with a token.
|
||||
// ContinuationToken is obfuscated and is not a real key.
|
||||
// You can use this ContinuationToken for pagination of the list results.
|
||||
ContinuationToken string `json:"continuationToken"`
|
||||
|
||||
// Delimiter is a character that you use to group keys.
|
||||
//
|
||||
// For directory buckets, "/" is the only supported delimiter.
|
||||
Delimiter string `json:"delimiter"`
|
||||
|
||||
// Prefix limits the response to keys that begin with the specified prefix.
|
||||
Prefix string `json:"prefix"`
|
||||
|
||||
// Encoding type is used to encode the object keys in the response.
|
||||
// Responses are encoded only in UTF-8.
|
||||
// An object key can contain any Unicode character.
|
||||
// However, the XML 1.0 parser can't parse certain characters,
|
||||
// such as characters with an ASCII value from 0 to 10.
|
||||
// For characters that aren't supported in XML 1.0, you can add
|
||||
// this parameter to request that S3 encode the keys in the response.
|
||||
//
|
||||
// Valid Values: url
|
||||
EncodingType string `json:"encodingType"`
|
||||
|
||||
// StartAfter is where you want S3 to start listing from.
|
||||
// S3 starts listing after this specified key.
|
||||
// StartAfter can be any key in the bucket.
|
||||
//
|
||||
// This functionality is not supported for directory buckets.
|
||||
StartAfter string `json:"startAfter"`
|
||||
|
||||
// MaxKeys Sets the maximum number of keys returned in the response.
|
||||
// By default, the action returns up to 1,000 key names.
|
||||
// The response might contain fewer keys but will never contain more.
|
||||
MaxKeys int `json:"maxKeys"`
|
||||
|
||||
// FetchOwner returns the owner field with each key in the result.
|
||||
FetchOwner bool `json:"fetchOwner"`
|
||||
}
|
||||
|
||||
// Encode encodes the parameters in a properly formatted query string.
|
||||
func (l *ListParams) Encode() string {
|
||||
query := url.Values{}
|
||||
|
||||
query.Add("list-type", "2")
|
||||
|
||||
if l.ContinuationToken != "" {
|
||||
query.Add("continuation-token", l.ContinuationToken)
|
||||
}
|
||||
|
||||
if l.Delimiter != "" {
|
||||
query.Add("delimiter", l.Delimiter)
|
||||
}
|
||||
|
||||
if l.Prefix != "" {
|
||||
query.Add("prefix", l.Prefix)
|
||||
}
|
||||
|
||||
if l.EncodingType != "" {
|
||||
query.Add("encoding-type", l.EncodingType)
|
||||
}
|
||||
|
||||
if l.FetchOwner {
|
||||
query.Add("fetch-owner", "true")
|
||||
}
|
||||
|
||||
if l.MaxKeys > 0 {
|
||||
query.Add("max-keys", strconv.Itoa(l.MaxKeys))
|
||||
}
|
||||
|
||||
if l.StartAfter != "" {
|
||||
query.Add("start-after", l.StartAfter)
|
||||
}
|
||||
|
||||
return query.Encode()
|
||||
}
|
||||
|
||||
// ListObjects retrieves paginated objects list.
|
||||
//
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
|
||||
func (s3 *S3) ListObjects(ctx context.Context, params ListParams, optReqFuncs ...func(*http.Request)) (*ListObjectsResponse, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, s3.URL("?"+params.Encode()), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optReqFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := s3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
result := &ListObjectsResponse{}
|
||||
|
||||
err = xml.NewDecoder(resp.Body).Decode(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_ResponseSyntax
|
||||
type ListObjectsResponse struct {
|
||||
XMLName xml.Name `json:"-" xml:"ListBucketResult"`
|
||||
EncodingType string `json:"encodingType" xml:"EncodingType"`
|
||||
Name string `json:"name" xml:"Name"`
|
||||
Prefix string `json:"prefix" xml:"Prefix"`
|
||||
Delimiter string `json:"delimiter" xml:"Delimiter"`
|
||||
ContinuationToken string `json:"continuationToken" xml:"ContinuationToken"`
|
||||
NextContinuationToken string `json:"nextContinuationToken" xml:"NextContinuationToken"`
|
||||
StartAfter string `json:"startAfter" xml:"StartAfter"`
|
||||
|
||||
CommonPrefixes []*ListObjectCommonPrefix `json:"commonPrefixes" xml:"CommonPrefixes"`
|
||||
|
||||
Contents []*ListObjectContent `json:"contents" xml:"Contents"`
|
||||
|
||||
KeyCount int `json:"keyCount" xml:"KeyCount"`
|
||||
MaxKeys int `json:"maxKeys" xml:"MaxKeys"`
|
||||
IsTruncated bool `json:"isTruncated" xml:"IsTruncated"`
|
||||
}
|
||||
|
||||
type ListObjectCommonPrefix struct {
|
||||
Prefix string `json:"prefix" xml:"Prefix"`
|
||||
}
|
||||
|
||||
type ListObjectContent struct {
|
||||
Owner struct {
|
||||
DisplayName string `json:"displayName" xml:"DisplayName"`
|
||||
ID string `json:"id" xml:"ID"`
|
||||
} `json:"owner" xml:"Owner"`
|
||||
|
||||
ChecksumAlgorithm string `json:"checksumAlgorithm" xml:"ChecksumAlgorithm"`
|
||||
ETag string `json:"etag" xml:"ETag"`
|
||||
Key string `json:"key" xml:"Key"`
|
||||
StorageClass string `json:"storageClass" xml:"StorageClass"`
|
||||
LastModified time.Time `json:"lastModified" xml:"LastModified"`
|
||||
|
||||
RestoreStatus struct {
|
||||
RestoreExpiryDate time.Time `json:"restoreExpiryDate" xml:"RestoreExpiryDate"`
|
||||
IsRestoreInProgress bool `json:"isRestoreInProgress" xml:"IsRestoreInProgress"`
|
||||
} `json:"restoreStatus" xml:"RestoreStatus"`
|
||||
|
||||
Size int64 `json:"size" xml:"Size"`
|
||||
}
|
157
tools/filesystem/internal/s3blob/s3/list_objects_test.go
Normal file
157
tools/filesystem/internal/s3blob/s3/list_objects_test.go
Normal file
|
@ -0,0 +1,157 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
|
||||
)
|
||||
|
||||
func TestS3ListParamsEncode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
scenarios := []struct {
|
||||
name string
|
||||
params s3.ListParams
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
"blank",
|
||||
s3.ListParams{},
|
||||
"list-type=2",
|
||||
},
|
||||
{
|
||||
"filled",
|
||||
s3.ListParams{
|
||||
ContinuationToken: "test_ct",
|
||||
Delimiter: "test_delimiter",
|
||||
Prefix: "test_prefix",
|
||||
EncodingType: "test_et",
|
||||
StartAfter: "test_sa",
|
||||
MaxKeys: 1,
|
||||
FetchOwner: true,
|
||||
},
|
||||
"continuation-token=test_ct&delimiter=test_delimiter&encoding-type=test_et&fetch-owner=true&list-type=2&max-keys=1&prefix=test_prefix&start-after=test_sa",
|
||||
},
|
||||
}
|
||||
|
||||
for _, s := range scenarios {
|
||||
t.Run(s.name, func(t *testing.T) {
|
||||
result := s.params.Encode()
|
||||
if result != s.expected {
|
||||
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3ListObjects(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
listParams := s3.ListParams{
|
||||
ContinuationToken: "test_ct",
|
||||
Delimiter: "test_delimiter",
|
||||
Prefix: "test_prefix",
|
||||
EncodingType: "test_et",
|
||||
StartAfter: "test_sa",
|
||||
MaxKeys: 10,
|
||||
FetchOwner: true,
|
||||
}
|
||||
|
||||
httpClient := tests.NewClient(
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodGet,
|
||||
URL: "http://test_bucket.example.com/?" + listParams.Encode(),
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Body: io.NopCloser(strings.NewReader(`
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Name>example</Name>
|
||||
<EncodingType>test_encoding</EncodingType>
|
||||
<Prefix>a/</Prefix>
|
||||
<Delimiter>/</Delimiter>
|
||||
<ContinuationToken>ct</ContinuationToken>
|
||||
<NextContinuationToken>nct</NextContinuationToken>
|
||||
<StartAfter>example0.txt</StartAfter>
|
||||
<KeyCount>1</KeyCount>
|
||||
<MaxKeys>3</MaxKeys>
|
||||
<IsTruncated>true</IsTruncated>
|
||||
<Contents>
|
||||
<Key>example1.txt</Key>
|
||||
<LastModified>2025-01-01T01:02:03.123Z</LastModified>
|
||||
<ChecksumAlgorithm>test_ca</ChecksumAlgorithm>
|
||||
<ETag>test_etag1</ETag>
|
||||
<Size>123</Size>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
<Owner>
|
||||
<DisplayName>owner_dn</DisplayName>
|
||||
<ID>owner_id</ID>
|
||||
</Owner>
|
||||
<RestoreStatus>
|
||||
<RestoreExpiryDate>2025-01-02T01:02:03.123Z</RestoreExpiryDate>
|
||||
<IsRestoreInProgress>true</IsRestoreInProgress>
|
||||
</RestoreStatus>
|
||||
</Contents>
|
||||
<Contents>
|
||||
<Key>example2.txt</Key>
|
||||
<LastModified>2025-01-02T01:02:03.123Z</LastModified>
|
||||
<ETag>test_etag2</ETag>
|
||||
<Size>456</Size>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
</Contents>
|
||||
<CommonPrefixes>
|
||||
<Prefix>a/b/</Prefix>
|
||||
</CommonPrefixes>
|
||||
<CommonPrefixes>
|
||||
<Prefix>a/c/</Prefix>
|
||||
</CommonPrefixes>
|
||||
</ListBucketResult>
|
||||
`)),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
s3Client := &s3.S3{
|
||||
Client: httpClient,
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
}
|
||||
|
||||
resp, err := s3Client.ListObjects(context.Background(), listParams, func(r *http.Request) {
|
||||
r.Header.Set("test_header", "test")
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = httpClient.AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
raw, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rawStr := string(raw)
|
||||
|
||||
expected := `{"encodingType":"test_encoding","name":"example","prefix":"a/","delimiter":"/","continuationToken":"ct","nextContinuationToken":"nct","startAfter":"example0.txt","commonPrefixes":[{"prefix":"a/b/"},{"prefix":"a/c/"}],"contents":[{"owner":{"displayName":"owner_dn","id":"owner_id"},"checksumAlgorithm":"test_ca","etag":"test_etag1","key":"example1.txt","storageClass":"STANDARD","lastModified":"2025-01-01T01:02:03.123Z","restoreStatus":{"restoreExpiryDate":"2025-01-02T01:02:03.123Z","isRestoreInProgress":true},"size":123},{"owner":{"displayName":"","id":""},"checksumAlgorithm":"","etag":"test_etag2","key":"example2.txt","storageClass":"STANDARD","lastModified":"2025-01-02T01:02:03.123Z","restoreStatus":{"restoreExpiryDate":"0001-01-01T00:00:00Z","isRestoreInProgress":false},"size":456}],"keyCount":1,"maxKeys":3,"isTruncated":true}`
|
||||
|
||||
if rawStr != expected {
|
||||
t.Fatalf("Expected response\n%s\ngot\n%s", expected, rawStr)
|
||||
}
|
||||
}
|
370
tools/filesystem/internal/s3blob/s3/s3.go
Normal file
370
tools/filesystem/internal/s3blob/s3/s3.go
Normal file
|
@ -0,0 +1,370 @@
|
|||
// Package s3 implements a lightweight client for interacting with the
|
||||
// REST APIs of any S3 compatible service.
|
||||
//
|
||||
// It implements only the minimal functionality required by PocketBase
|
||||
// such as objects list, get, copy, delete and upload.
|
||||
//
|
||||
// For more details why we don't use the official aws-sdk-go-v2, you could check
|
||||
// https://github.com/pocketbase/pocketbase/discussions/6562.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// client := &s3.S3{
|
||||
// Endpoint: "example.com",
|
||||
// Region: "us-east-1",
|
||||
// Bucket: "test",
|
||||
// AccessKey: "...",
|
||||
// SecretKey: "...",
|
||||
// UsePathStyle: true,
|
||||
// }
|
||||
// resp, err := client.GetObject(context.Background(), "abc.txt")
|
||||
package s3
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
awsS3ServiceCode = "s3"
|
||||
awsSignAlgorithm = "AWS4-HMAC-SHA256"
|
||||
awsTerminationString = "aws4_request"
|
||||
metadataPrefix = "x-amz-meta-"
|
||||
dateTimeFormat = "20060102T150405Z"
|
||||
)
|
||||
|
||||
type HTTPClient interface {
|
||||
Do(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
type S3 struct {
|
||||
// Client specifies a custom HTTP client to send the request with.
|
||||
//
|
||||
// If not explicitly set, fallbacks to http.DefaultClient.
|
||||
Client HTTPClient
|
||||
|
||||
Bucket string
|
||||
Region string
|
||||
Endpoint string // can be with or without the schema
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
UsePathStyle bool
|
||||
}
|
||||
|
||||
// URL constructs an S3 request URL based on the current configuration.
|
||||
func (s3 *S3) URL(path string) string {
|
||||
scheme := "https"
|
||||
endpoint := strings.TrimRight(s3.Endpoint, "/")
|
||||
if after, ok := strings.CutPrefix(endpoint, "https://"); ok {
|
||||
endpoint = after
|
||||
} else if after, ok := strings.CutPrefix(endpoint, "http://"); ok {
|
||||
endpoint = after
|
||||
scheme = "http"
|
||||
}
|
||||
|
||||
path = strings.TrimLeft(path, "/")
|
||||
|
||||
if s3.UsePathStyle {
|
||||
return fmt.Sprintf("%s://%s/%s/%s", scheme, endpoint, s3.Bucket, path)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s://%s.%s/%s", scheme, s3.Bucket, endpoint, path)
|
||||
}
|
||||
|
||||
// SignAndSend signs the provided request per AWS Signature v4 and sends it.
|
||||
//
|
||||
// It automatically normalizes all 40x/50x responses to ResponseError.
|
||||
//
|
||||
// Note: Don't forget to call resp.Body.Close() after done with the result.
|
||||
func (s3 *S3) SignAndSend(req *http.Request) (*http.Response, error) {
|
||||
s3.sign(req)
|
||||
|
||||
client := s3.Client
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
defer resp.Body.Close()
|
||||
|
||||
respErr := &ResponseError{
|
||||
Status: resp.StatusCode,
|
||||
}
|
||||
|
||||
respErr.Raw, err = io.ReadAll(resp.Body)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, errors.Join(err, respErr)
|
||||
}
|
||||
|
||||
if len(respErr.Raw) > 0 {
|
||||
err = xml.Unmarshal(respErr.Raw, respErr)
|
||||
if err != nil {
|
||||
return nil, errors.Join(err, respErr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, respErr
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-signed-request-steps
|
||||
func (s3 *S3) sign(req *http.Request) {
|
||||
// fallback to the Unsigned payload option
|
||||
// (data integrity checks could be still applied via the content-md5 or x-amz-checksum-* headers)
|
||||
if req.Header.Get("x-amz-content-sha256") == "" {
|
||||
req.Header.Set("x-amz-content-sha256", "UNSIGNED-PAYLOAD")
|
||||
}
|
||||
|
||||
reqDateTime, _ := time.Parse(dateTimeFormat, req.Header.Get("x-amz-date"))
|
||||
if reqDateTime.IsZero() {
|
||||
reqDateTime = time.Now().UTC()
|
||||
req.Header.Set("x-amz-date", reqDateTime.Format(dateTimeFormat))
|
||||
}
|
||||
|
||||
req.Header.Set("host", req.URL.Host)
|
||||
|
||||
date := reqDateTime.Format("20060102")
|
||||
|
||||
dateTime := reqDateTime.Format(dateTimeFormat)
|
||||
|
||||
// 1. Create canonical request
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-canonical-request
|
||||
// ---------------------------------------------------------------
|
||||
canonicalHeaders, signedHeaders := canonicalAndSignedHeaders(req)
|
||||
|
||||
canonicalParts := []string{
|
||||
req.Method,
|
||||
escapePath(req.URL.Path),
|
||||
escapeQuery(req.URL.Query()),
|
||||
canonicalHeaders,
|
||||
signedHeaders,
|
||||
req.Header.Get("x-amz-content-sha256"),
|
||||
}
|
||||
|
||||
// 2. Create a hash of the canonical request
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-canonical-request-hash
|
||||
// ---------------------------------------------------------------
|
||||
hashedCanonicalRequest := sha256Hex([]byte(strings.Join(canonicalParts, "\n")))
|
||||
|
||||
// 3. Create a string to sign
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-string-to-sign
|
||||
// ---------------------------------------------------------------
|
||||
scope := strings.Join([]string{
|
||||
date,
|
||||
s3.Region,
|
||||
awsS3ServiceCode,
|
||||
awsTerminationString,
|
||||
}, "/")
|
||||
|
||||
stringToSign := strings.Join([]string{
|
||||
awsSignAlgorithm,
|
||||
dateTime,
|
||||
scope,
|
||||
hashedCanonicalRequest,
|
||||
}, "\n")
|
||||
|
||||
// 4. Derive a signing key for SigV4
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#derive-signing-key
|
||||
// ---------------------------------------------------------------
|
||||
dateKey := hmacSHA256([]byte("AWS4"+s3.SecretKey), date)
|
||||
dateRegionKey := hmacSHA256(dateKey, s3.Region)
|
||||
dateRegionServiceKey := hmacSHA256(dateRegionKey, awsS3ServiceCode)
|
||||
signingKey := hmacSHA256(dateRegionServiceKey, awsTerminationString)
|
||||
signature := hex.EncodeToString(hmacSHA256(signingKey, stringToSign))
|
||||
|
||||
// 5. Add the signature to the request
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#add-signature-to-request
|
||||
authorization := fmt.Sprintf(
|
||||
"%s Credential=%s/%s, SignedHeaders=%s, Signature=%s",
|
||||
awsSignAlgorithm,
|
||||
s3.AccessKey,
|
||||
scope,
|
||||
signedHeaders,
|
||||
signature,
|
||||
)
|
||||
|
||||
req.Header.Set("authorization", authorization)
|
||||
}
|
||||
|
||||
func sha256Hex(content []byte) string {
|
||||
h := sha256.New()
|
||||
h.Write(content)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func hmacSHA256(key []byte, content string) []byte {
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write([]byte(content))
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
func canonicalAndSignedHeaders(req *http.Request) (string, string) {
|
||||
signed := []string{}
|
||||
canonical := map[string]string{}
|
||||
|
||||
for key, values := range req.Header {
|
||||
normalizedKey := strings.ToLower(key)
|
||||
|
||||
if normalizedKey != "host" &&
|
||||
normalizedKey != "content-type" &&
|
||||
!strings.HasPrefix(normalizedKey, "x-amz-") {
|
||||
continue
|
||||
}
|
||||
|
||||
signed = append(signed, normalizedKey)
|
||||
|
||||
// for each value:
|
||||
// trim any leading or trailing spaces
|
||||
// convert sequential spaces to a single space
|
||||
normalizedValues := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
normalizedValues[i] = strings.ReplaceAll(strings.TrimSpace(v), " ", " ")
|
||||
}
|
||||
|
||||
canonical[normalizedKey] = strings.Join(normalizedValues, ",")
|
||||
}
|
||||
|
||||
slices.Sort(signed)
|
||||
|
||||
var sortedCanonical strings.Builder
|
||||
for _, key := range signed {
|
||||
sortedCanonical.WriteString(key)
|
||||
sortedCanonical.WriteString(":")
|
||||
sortedCanonical.WriteString(canonical[key])
|
||||
sortedCanonical.WriteString("\n")
|
||||
}
|
||||
|
||||
return sortedCanonical.String(), strings.Join(signed, ";")
|
||||
}
|
||||
|
||||
// extractMetadata parses and extracts and the metadata from the specified request headers.
|
||||
//
|
||||
// The metadata keys are all lowercased and without the "x-amz-meta-" prefix.
|
||||
func extractMetadata(headers http.Header) map[string]string {
|
||||
result := map[string]string{}
|
||||
|
||||
for k, v := range headers {
|
||||
if len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
metadataKey, ok := strings.CutPrefix(strings.ToLower(k), metadataPrefix)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
result[metadataKey] = v[0]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// escapeQuery returns the URI encoded request query parameters according to the AWS S3 spec requirements
|
||||
// (it is similar to url.Values.Encode but instead of url.QueryEscape uses our own escape method).
|
||||
func escapeQuery(values url.Values) string {
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var buf strings.Builder
|
||||
|
||||
keys := make([]string, 0, len(values))
|
||||
for k := range values {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
slices.Sort(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
vs := values[k]
|
||||
keyEscaped := escape(k)
|
||||
for _, values := range vs {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteByte('&')
|
||||
}
|
||||
buf.WriteString(keyEscaped)
|
||||
buf.WriteByte('=')
|
||||
buf.WriteString(escape(values))
|
||||
}
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// escapePath returns the URI encoded request path according to the AWS S3 spec requirements.
|
||||
func escapePath(path string) string {
|
||||
parts := strings.Split(path, "/")
|
||||
|
||||
for i, part := range parts {
|
||||
parts[i] = escape(part)
|
||||
}
|
||||
|
||||
return strings.Join(parts, "/")
|
||||
}
|
||||
|
||||
const upperhex = "0123456789ABCDEF"
|
||||
|
||||
// escape is similar to the std url.escape but implements the AWS [UriEncode requirements]:
|
||||
// - URI encode every byte except the unreserved characters: 'A'-'Z', 'a'-'z', '0'-'9', '-', '.', '_', and '~'.
|
||||
// - The space character is a reserved character and must be encoded as "%20" (and not as "+").
|
||||
// - Each URI encoded byte is formed by a '%' and the two-digit hexadecimal value of the byte.
|
||||
// - Letters in the hexadecimal value must be uppercase, for example "%1A".
|
||||
//
|
||||
// [UriEncode requirements]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html
|
||||
func escape(s string) string {
|
||||
hexCount := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c) {
|
||||
hexCount++
|
||||
}
|
||||
}
|
||||
|
||||
if hexCount == 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
result := make([]byte, len(s)+2*hexCount)
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c) {
|
||||
result[j] = '%'
|
||||
result[j+1] = upperhex[c>>4]
|
||||
result[j+2] = upperhex[c&15]
|
||||
j += 3
|
||||
} else {
|
||||
result[j] = c
|
||||
j++
|
||||
}
|
||||
}
|
||||
|
||||
return string(result)
|
||||
}
|
||||
|
||||
// > "URI encode every byte except the unreserved characters: 'A'-'Z', 'a'-'z', '0'-'9', '-', '.', '_', and '~'."
|
||||
func shouldEscape(c byte) bool {
|
||||
isUnreserved := (c >= 'A' && c <= 'Z') ||
|
||||
(c >= 'a' && c <= 'z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '-' || c == '.' || c == '_' || c == '~'
|
||||
|
||||
return !isUnreserved
|
||||
}
|
35
tools/filesystem/internal/s3blob/s3/s3_escape_test.go
Normal file
35
tools/filesystem/internal/s3blob/s3/s3_escape_test.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEscapePath(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
escaped := escapePath("/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~ !@#$%^&*()+={}[]?><\\|,`'\"/@sub1/@sub2/a/b/c/1/2/3")
|
||||
|
||||
expected := "/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~%20%21%40%23%24%25%5E%26%2A%28%29%2B%3D%7B%7D%5B%5D%3F%3E%3C%5C%7C%2C%60%27%22/%40sub1/%40sub2/a/b/c/1/2/3"
|
||||
|
||||
if escaped != expected {
|
||||
t.Fatalf("Expected\n%s\ngot\n%s", expected, escaped)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEscapeQuery(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
escaped := escapeQuery(url.Values{
|
||||
"abc": []string{"123"},
|
||||
"/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~ !@#$%^&*()+={}[]?><\\|,`'\"": []string{
|
||||
"/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~ !@#$%^&*()+={}[]?><\\|,`'\"",
|
||||
},
|
||||
})
|
||||
|
||||
expected := "%2FABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~%20%21%40%23%24%25%5E%26%2A%28%29%2B%3D%7B%7D%5B%5D%3F%3E%3C%5C%7C%2C%60%27%22=%2FABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~%20%21%40%23%24%25%5E%26%2A%28%29%2B%3D%7B%7D%5B%5D%3F%3E%3C%5C%7C%2C%60%27%22&abc=123"
|
||||
|
||||
if escaped != expected {
|
||||
t.Fatalf("Expected\n%s\ngot\n%s", expected, escaped)
|
||||
}
|
||||
}
|
256
tools/filesystem/internal/s3blob/s3/s3_test.go
Normal file
256
tools/filesystem/internal/s3blob/s3/s3_test.go
Normal file
|
@ -0,0 +1,256 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
|
||||
)
|
||||
|
||||
func TestS3URL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
scenarios := []struct {
|
||||
name string
|
||||
s3Client *s3.S3
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
"no schema",
|
||||
&s3.S3{
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "example.com/",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
},
|
||||
"https://test_bucket.example.com/test_key/a/b/c?q=1",
|
||||
},
|
||||
{
|
||||
"with https schema",
|
||||
&s3.S3{
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "https://example.com/",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
},
|
||||
"https://test_bucket.example.com/test_key/a/b/c?q=1",
|
||||
},
|
||||
{
|
||||
"with http schema",
|
||||
&s3.S3{
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com/",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
},
|
||||
"http://test_bucket.example.com/test_key/a/b/c?q=1",
|
||||
},
|
||||
{
|
||||
"path style addressing (non-explicit schema)",
|
||||
&s3.S3{
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "example.com/",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
UsePathStyle: true,
|
||||
},
|
||||
"https://example.com/test_bucket/test_key/a/b/c?q=1",
|
||||
},
|
||||
{
|
||||
"path style addressing (explicit schema)",
|
||||
&s3.S3{
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com/",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
UsePathStyle: true,
|
||||
},
|
||||
"http://example.com/test_bucket/test_key/a/b/c?q=1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, s := range scenarios {
|
||||
t.Run(s.name, func(t *testing.T) {
|
||||
result := s.s3Client.URL("/test_key/a/b/c?q=1")
|
||||
if result != s.expected {
|
||||
t.Fatalf("Expected URL\n%s\ngot\n%s", s.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3SignAndSend(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testResponse := func() *http.Response {
|
||||
return &http.Response{
|
||||
Body: io.NopCloser(strings.NewReader("test_response")),
|
||||
}
|
||||
}
|
||||
|
||||
scenarios := []struct {
|
||||
name string
|
||||
path string
|
||||
reqFunc func(req *http.Request)
|
||||
s3Client *s3.S3
|
||||
}{
|
||||
{
|
||||
"minimal",
|
||||
"/test",
|
||||
func(req *http.Request) {
|
||||
req.Header.Set("x-amz-date", "20250102T150405Z")
|
||||
},
|
||||
&s3.S3{
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "https://example.com/",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
Client: tests.NewClient(&tests.RequestStub{
|
||||
Method: http.MethodGet,
|
||||
URL: "https://test_bucket.example.com/test",
|
||||
Response: testResponse(),
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Authorization": "AWS4-HMAC-SHA256 Credential=123/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=ea093662bc1deef08dfb4ac35453dfaad5ea89edf102e9dd3b7156c9a27e4c1f",
|
||||
"Host": "test_bucket.example.com",
|
||||
"X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD",
|
||||
"X-Amz-Date": "20250102T150405Z",
|
||||
})
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
"minimal with different access and secret keys",
|
||||
"/test",
|
||||
func(req *http.Request) {
|
||||
req.Header.Set("x-amz-date", "20250102T150405Z")
|
||||
},
|
||||
&s3.S3{
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "https://example.com/",
|
||||
AccessKey: "456",
|
||||
SecretKey: "def",
|
||||
Client: tests.NewClient(&tests.RequestStub{
|
||||
Method: http.MethodGet,
|
||||
URL: "https://test_bucket.example.com/test",
|
||||
Response: testResponse(),
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Authorization": "AWS4-HMAC-SHA256 Credential=456/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=17510fa1f724403dd0a563b61c9b31d1d718f877fcbd75455620d17a8afce5fb",
|
||||
"Host": "test_bucket.example.com",
|
||||
"X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD",
|
||||
"X-Amz-Date": "20250102T150405Z",
|
||||
})
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
"minimal with special characters",
|
||||
"/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~!@#$^&*()=/@sub?a=1&@b=@2",
|
||||
func(req *http.Request) {
|
||||
req.Header.Set("x-amz-date", "20250102T150405Z")
|
||||
},
|
||||
&s3.S3{
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "https://example.com/",
|
||||
AccessKey: "456",
|
||||
SecretKey: "def",
|
||||
Client: tests.NewClient(&tests.RequestStub{
|
||||
Method: http.MethodGet,
|
||||
URL: "https://test_bucket.example.com/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~!@#$%5E&*()=/@sub?a=1&@b=@2",
|
||||
Response: testResponse(),
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Authorization": "AWS4-HMAC-SHA256 Credential=456/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=e0001982deef1652704f74503203e77d83d4c88369421f9fca644d96f2a62a3c",
|
||||
"Host": "test_bucket.example.com",
|
||||
"X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD",
|
||||
"X-Amz-Date": "20250102T150405Z",
|
||||
})
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
"with extra headers",
|
||||
"/test",
|
||||
func(req *http.Request) {
|
||||
req.Header.Set("x-amz-date", "20250102T150405Z")
|
||||
req.Header.Set("x-amz-content-sha256", "test_sha256")
|
||||
req.Header.Set("x-amz-example", "123")
|
||||
req.Header.Set("x-amz-meta-a", "456")
|
||||
req.Header.Set("content-type", "image/png")
|
||||
req.Header.Set("x-test", "789") // shouldn't be included in the signing headers
|
||||
},
|
||||
&s3.S3{
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "https://example.com/",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
Client: tests.NewClient(&tests.RequestStub{
|
||||
Method: http.MethodGet,
|
||||
URL: "https://test_bucket.example.com/test",
|
||||
Response: testResponse(),
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"authorization": "AWS4-HMAC-SHA256 Credential=123/20250102/test_region/s3/aws4_request, SignedHeaders=content-type;host;x-amz-content-sha256;x-amz-date;x-amz-example;x-amz-meta-a, Signature=86dccbcd012c33073dc99e9d0a9e0b717a4d8c11c37848cfa9a4a02716bc0db3",
|
||||
"host": "test_bucket.example.com",
|
||||
"x-amz-date": "20250102T150405Z",
|
||||
"x-amz-content-sha256": "test_sha256",
|
||||
"x-amz-example": "123",
|
||||
"x-amz-meta-a": "456",
|
||||
"x-test": "789",
|
||||
})
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, s := range scenarios {
|
||||
t.Run(s.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest(http.MethodGet, s.s3Client.URL(s.path), strings.NewReader("test_request"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if s.reqFunc != nil {
|
||||
s.reqFunc(req)
|
||||
}
|
||||
|
||||
resp, err := s.s3Client.SignAndSend(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
err = s.s3Client.Client.(*tests.Client).AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedBody := "test_response"
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if str := string(body); str != expectedBody {
|
||||
t.Fatalf("Expected body %q, got %q", expectedBody, str)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
111
tools/filesystem/internal/s3blob/s3/tests/client.go
Normal file
111
tools/filesystem/internal/s3blob/s3/tests/client.go
Normal file
|
@ -0,0 +1,111 @@
|
|||
// Package tests contains various tests helpers and utilities to assist
|
||||
// with the S3 client testing.
|
||||
package tests
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// NewClient creates a new test Client loaded with the specified RequestStubs.
|
||||
func NewClient(stubs ...*RequestStub) *Client {
|
||||
return &Client{stubs: stubs}
|
||||
}
|
||||
|
||||
type RequestStub struct {
|
||||
Method string
|
||||
URL string // plain string or regex pattern wrapped in "^pattern$"
|
||||
Match func(req *http.Request) bool
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
stubs []*RequestStub
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// AssertNoRemaining asserts that current client has no unprocessed requests remaining.
|
||||
func (c *Client) AssertNoRemaining() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if len(c.stubs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
msgParts := make([]string, 0, len(c.stubs)+1)
|
||||
msgParts = append(msgParts, "not all stub requests were processed:")
|
||||
for _, stub := range c.stubs {
|
||||
msgParts = append(msgParts, "- "+stub.Method+" "+stub.URL)
|
||||
}
|
||||
|
||||
return errors.New(strings.Join(msgParts, "\n"))
|
||||
}
|
||||
|
||||
// Do implements the [s3.HTTPClient] interface.
|
||||
func (c *Client) Do(req *http.Request) (*http.Response, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for i, stub := range c.stubs {
|
||||
if req.Method != stub.Method {
|
||||
continue
|
||||
}
|
||||
|
||||
urlPattern := stub.URL
|
||||
if !strings.HasPrefix(urlPattern, "^") && !strings.HasSuffix(urlPattern, "$") {
|
||||
urlPattern = "^" + regexp.QuoteMeta(urlPattern) + "$"
|
||||
}
|
||||
|
||||
urlRegex, err := regexp.Compile(urlPattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !urlRegex.MatchString(req.URL.String()) {
|
||||
continue
|
||||
}
|
||||
|
||||
if stub.Match != nil && !stub.Match(req) {
|
||||
continue
|
||||
}
|
||||
|
||||
// remove from the remaining stubs
|
||||
c.stubs = slices.Delete(c.stubs, i, i+1)
|
||||
|
||||
response := stub.Response
|
||||
if response == nil {
|
||||
response = &http.Response{}
|
||||
}
|
||||
if response.Header == nil {
|
||||
response.Header = http.Header{}
|
||||
}
|
||||
if response.Body == nil {
|
||||
response.Body = http.NoBody
|
||||
}
|
||||
|
||||
response.Request = req
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
var body []byte
|
||||
if req.Body != nil {
|
||||
defer req.Body.Close()
|
||||
body, _ = io.ReadAll(req.Body)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(
|
||||
"the below request doesn't have a corresponding stub:\n%s %s\nHeaders: %v\nBody: %q",
|
||||
req.Method,
|
||||
req.URL.String(),
|
||||
req.Header,
|
||||
body,
|
||||
)
|
||||
}
|
33
tools/filesystem/internal/s3blob/s3/tests/headers.go
Normal file
33
tools/filesystem/internal/s3blob/s3/tests/headers.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
package tests
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ExpectHeaders checks whether specified headers match the expectations.
|
||||
// The expectations map entry key is the header name.
|
||||
// The expectations map entry value is the first header value. If wrapped with `^...$`
|
||||
// it is compared as regular expression.
|
||||
func ExpectHeaders(headers http.Header, expectations map[string]string) bool {
|
||||
for h, expected := range expectations {
|
||||
v := headers.Get(h)
|
||||
|
||||
pattern := expected
|
||||
if !strings.HasPrefix(pattern, "^") && !strings.HasSuffix(pattern, "$") {
|
||||
pattern = "^" + regexp.QuoteMeta(pattern) + "$"
|
||||
}
|
||||
|
||||
expectedRegex, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !expectedRegex.MatchString(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
414
tools/filesystem/internal/s3blob/s3/uploader.go
Normal file
414
tools/filesystem/internal/s3blob/s3/uploader.go
Normal file
|
@ -0,0 +1,414 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var ErrUsedUploader = errors.New("the Uploader has been already used")
|
||||
|
||||
const (
|
||||
defaultMaxConcurrency int = 5
|
||||
defaultMinPartSize int = 6 << 20
|
||||
)
|
||||
|
||||
// Uploader handles the upload of a single S3 object.
|
||||
//
|
||||
// If the Payload size is less than the configured MinPartSize it sends
|
||||
// a single (PutObject) request, otherwise performs chunked/multipart upload.
|
||||
type Uploader struct {
|
||||
// S3 is the S3 client instance performing the upload object request (required).
|
||||
S3 *S3
|
||||
|
||||
// Payload is the object content to upload (required).
|
||||
Payload io.Reader
|
||||
|
||||
// Key is the destination key of the uploaded object (required).
|
||||
Key string
|
||||
|
||||
// Metadata specifies the optional metadata to write with the object upload.
|
||||
Metadata map[string]string
|
||||
|
||||
// MaxConcurrency specifies the max number of workers to use when
|
||||
// performing chunked/multipart upload.
|
||||
//
|
||||
// If zero or negative, defaults to 5.
|
||||
//
|
||||
// This option is used only when the Payload size is > MinPartSize.
|
||||
MaxConcurrency int
|
||||
|
||||
// MinPartSize specifies the min Payload size required to perform
|
||||
// chunked/multipart upload.
|
||||
//
|
||||
// If zero or negative, defaults to ~6MB.
|
||||
MinPartSize int
|
||||
|
||||
uploadId string
|
||||
uploadedParts []*mpPart
|
||||
lastPartNumber int
|
||||
mu sync.Mutex // guards lastPartNumber and the uploadedParts slice
|
||||
used bool
|
||||
}
|
||||
|
||||
// Upload processes the current Uploader instance.
|
||||
//
|
||||
// Users can specify an optional optReqFuncs that will be passed down to all Upload internal requests
|
||||
// (single upload, multipart init, multipart parts upload, multipart complete, multipart abort).
|
||||
//
|
||||
// Note that after this call the Uploader should be discarded (aka. no longer can be used).
|
||||
func (u *Uploader) Upload(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
|
||||
if u.used {
|
||||
return ErrUsedUploader
|
||||
}
|
||||
|
||||
err := u.validateAndNormalize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
initPart, _, err := u.nextPart()
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(initPart) < u.MinPartSize {
|
||||
return u.singleUpload(ctx, initPart, optReqFuncs...)
|
||||
}
|
||||
|
||||
err = u.multipartInit(ctx, optReqFuncs...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart init error: %w", err)
|
||||
}
|
||||
|
||||
err = u.multipartUpload(ctx, initPart, optReqFuncs...)
|
||||
if err != nil {
|
||||
return errors.Join(
|
||||
u.multipartAbort(ctx, optReqFuncs...),
|
||||
fmt.Errorf("multipart upload error: %w", err),
|
||||
)
|
||||
}
|
||||
|
||||
err = u.multipartComplete(ctx, optReqFuncs...)
|
||||
if err != nil {
|
||||
return errors.Join(
|
||||
u.multipartAbort(ctx, optReqFuncs...),
|
||||
fmt.Errorf("multipart complete error: %w", err),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
func (u *Uploader) validateAndNormalize() error {
|
||||
if u.S3 == nil {
|
||||
return errors.New("Uploader.S3 must be a non-empty and properly initialized S3 client instance")
|
||||
}
|
||||
|
||||
if u.Key == "" {
|
||||
return errors.New("Uploader.Key is required")
|
||||
}
|
||||
|
||||
if u.Payload == nil {
|
||||
return errors.New("Uploader.Payload must be non-nill")
|
||||
}
|
||||
|
||||
if u.MaxConcurrency <= 0 {
|
||||
u.MaxConcurrency = defaultMaxConcurrency
|
||||
}
|
||||
|
||||
if u.MinPartSize <= 0 {
|
||||
u.MinPartSize = defaultMinPartSize
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uploader) singleUpload(ctx context.Context, part []byte, optReqFuncs ...func(*http.Request)) error {
|
||||
if u.used {
|
||||
return ErrUsedUploader
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.S3.URL(u.Key), bytes.NewReader(part))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Length", strconv.Itoa(len(part)))
|
||||
|
||||
for k, v := range u.Metadata {
|
||||
req.Header.Set(metadataPrefix+k, v)
|
||||
}
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optReqFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := u.S3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
type mpPart struct {
|
||||
XMLName xml.Name `xml:"Part"`
|
||||
ETag string `xml:"ETag"`
|
||||
PartNumber int `xml:"PartNumber"`
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
|
||||
func (u *Uploader) multipartInit(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
|
||||
if u.used {
|
||||
return ErrUsedUploader
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.S3.URL(u.Key+"?uploads"), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range u.Metadata {
|
||||
req.Header.Set(metadataPrefix+k, v)
|
||||
}
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optReqFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := u.S3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body := &struct {
|
||||
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
|
||||
UploadId string `xml:"UploadId"`
|
||||
}{}
|
||||
|
||||
err = xml.NewDecoder(resp.Body).Decode(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.uploadId = body.UploadId
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
|
||||
func (u *Uploader) multipartAbort(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
||||
u.used = true
|
||||
|
||||
// ensure that the specified abort context is always valid to allow cleanup
|
||||
var abortCtx = ctx
|
||||
if abortCtx.Err() != nil {
|
||||
abortCtx = context.Background()
|
||||
}
|
||||
|
||||
query := url.Values{"uploadId": []string{u.uploadId}}
|
||||
|
||||
req, err := http.NewRequestWithContext(abortCtx, http.MethodDelete, u.S3.URL(u.Key+"?"+query.Encode()), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optReqFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := u.S3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
|
||||
func (u *Uploader) multipartComplete(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
||||
u.used = true
|
||||
|
||||
// the list of parts must be sorted in ascending order
|
||||
slices.SortFunc(u.uploadedParts, func(a, b *mpPart) int {
|
||||
if a.PartNumber < b.PartNumber {
|
||||
return -1
|
||||
}
|
||||
if a.PartNumber > b.PartNumber {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
// build a request payload with the uploaded parts
|
||||
xmlParts := &struct {
|
||||
XMLName xml.Name `xml:"CompleteMultipartUpload"`
|
||||
Parts []*mpPart
|
||||
}{
|
||||
Parts: u.uploadedParts,
|
||||
}
|
||||
rawXMLParts, err := xml.Marshal(xmlParts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqPayload := strings.NewReader(xml.Header + string(rawXMLParts))
|
||||
|
||||
query := url.Values{"uploadId": []string{u.uploadId}}
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.S3.URL(u.Key+"?"+query.Encode()), reqPayload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optReqFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := u.S3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uploader) nextPart() ([]byte, int, error) {
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
||||
part := make([]byte, u.MinPartSize)
|
||||
n, err := io.ReadFull(u.Payload, part)
|
||||
|
||||
// normalize io.EOF errors and ensure that io.EOF is returned only when there were no read bytes
|
||||
if err != nil && (errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
|
||||
if n == 0 {
|
||||
err = io.EOF
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
u.lastPartNumber++
|
||||
|
||||
return part[0:n], u.lastPartNumber, err
|
||||
}
|
||||
|
||||
func (u *Uploader) multipartUpload(ctx context.Context, initPart []byte, optReqFuncs ...func(*http.Request)) error {
|
||||
var g errgroup.Group
|
||||
g.SetLimit(u.MaxConcurrency)
|
||||
|
||||
totalParallel := u.MaxConcurrency
|
||||
|
||||
if len(initPart) != 0 {
|
||||
totalParallel--
|
||||
initPartNumber := u.lastPartNumber
|
||||
g.Go(func() error {
|
||||
mp, err := u.uploadPart(ctx, initPartNumber, initPart, optReqFuncs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.mu.Lock()
|
||||
u.uploadedParts = append(u.uploadedParts, mp)
|
||||
u.mu.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
for i := 0; i < totalParallel; i++ {
|
||||
g.Go(func() error {
|
||||
for {
|
||||
part, num, err := u.nextPart()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
mp, err := u.uploadPart(ctx, num, part, optReqFuncs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.mu.Lock()
|
||||
u.uploadedParts = append(u.uploadedParts, mp)
|
||||
u.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
|
||||
func (u *Uploader) uploadPart(ctx context.Context, partNumber int, partData []byte, optReqFuncs ...func(*http.Request)) (*mpPart, error) {
|
||||
query := url.Values{}
|
||||
query.Set("uploadId", u.uploadId)
|
||||
query.Set("partNumber", strconv.Itoa(partNumber))
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.S3.URL(u.Key+"?"+query.Encode()), bytes.NewReader(partData))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Length", strconv.Itoa(len(partData)))
|
||||
|
||||
// apply optional request funcs
|
||||
for _, fn := range optReqFuncs {
|
||||
if fn != nil {
|
||||
fn(req)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := u.S3.SignAndSend(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return &mpPart{
|
||||
PartNumber: partNumber,
|
||||
ETag: resp.Header.Get("ETag"),
|
||||
}, nil
|
||||
}
|
463
tools/filesystem/internal/s3blob/s3/uploader_test.go
Normal file
463
tools/filesystem/internal/s3blob/s3/uploader_test.go
Normal file
|
@ -0,0 +1,463 @@
|
|||
package s3_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
|
||||
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
|
||||
)
|
||||
|
||||
func TestUploaderRequiredFields(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
s3Client := &s3.S3{
|
||||
Client: tests.NewClient(&tests.RequestStub{Method: "PUT", URL: `^.+$`}), // match every upload
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
}
|
||||
|
||||
payload := strings.NewReader("test")
|
||||
|
||||
scenarios := []struct {
|
||||
name string
|
||||
uploader *s3.Uploader
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
"blank",
|
||||
&s3.Uploader{},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"no Key",
|
||||
&s3.Uploader{S3: s3Client, Payload: payload},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"no S3",
|
||||
&s3.Uploader{Key: "abc", Payload: payload},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"no Payload",
|
||||
&s3.Uploader{S3: s3Client, Key: "abc"},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"with S3, Key and Payload",
|
||||
&s3.Uploader{S3: s3Client, Key: "abc", Payload: payload},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, s := range scenarios {
|
||||
t.Run(s.name, func(t *testing.T) {
|
||||
err := s.uploader.Upload(context.Background())
|
||||
|
||||
hasErr := err != nil
|
||||
if hasErr != s.expectedError {
|
||||
t.Fatalf("Expected hasErr %v, got %v", s.expectedError, hasErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploaderSingleUpload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
httpClient := tests.NewClient(
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPut,
|
||||
URL: "http://test_bucket.example.com/test_key",
|
||||
Match: func(req *http.Request) bool {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return string(body) == "abcdefg" && tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Content-Length": "7",
|
||||
"x-amz-meta-a": "123",
|
||||
"x-amz-meta-b": "456",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
uploader := &s3.Uploader{
|
||||
S3: &s3.S3{
|
||||
Client: httpClient,
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
},
|
||||
Key: "test_key",
|
||||
Payload: strings.NewReader("abcdefg"),
|
||||
Metadata: map[string]string{"a": "123", "b": "456"},
|
||||
MinPartSize: 8,
|
||||
}
|
||||
|
||||
err := uploader.Upload(context.Background(), func(r *http.Request) {
|
||||
r.Header.Set("test_header", "test")
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = httpClient.AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploaderMultipartUploadSuccess(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
httpClient := tests.NewClient(
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPost,
|
||||
URL: "http://test_bucket.example.com/test_key?uploads",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"x-amz-meta-a": "123",
|
||||
"x-amz-meta-b": "456",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Body: io.NopCloser(strings.NewReader(`
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<InitiateMultipartUploadResult>
|
||||
<Bucket>test_bucket</Bucket>
|
||||
<Key>test_key</Key>
|
||||
<UploadId>test_id</UploadId>
|
||||
</InitiateMultipartUploadResult>
|
||||
`)),
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPut,
|
||||
URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return string(body) == "abc" && tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Content-Length": "3",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Header: http.Header{"Etag": []string{"etag1"}},
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPut,
|
||||
URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return string(body) == "def" && tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Content-Length": "3",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Header: http.Header{"Etag": []string{"etag2"}},
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPut,
|
||||
URL: "http://test_bucket.example.com/test_key?partNumber=3&uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return string(body) == "g" && tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Content-Length": "1",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Header: http.Header{"Etag": []string{"etag3"}},
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPost,
|
||||
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
expected := `<CompleteMultipartUpload><Part><ETag>etag1</ETag><PartNumber>1</PartNumber></Part><Part><ETag>etag2</ETag><PartNumber>2</PartNumber></Part><Part><ETag>etag3</ETag><PartNumber>3</PartNumber></Part></CompleteMultipartUpload>`
|
||||
|
||||
return strings.Contains(string(body), expected) && tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
uploader := &s3.Uploader{
|
||||
S3: &s3.S3{
|
||||
Client: httpClient,
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
},
|
||||
Key: "test_key",
|
||||
Payload: strings.NewReader("abcdefg"),
|
||||
Metadata: map[string]string{"a": "123", "b": "456"},
|
||||
MinPartSize: 3,
|
||||
}
|
||||
|
||||
err := uploader.Upload(context.Background(), func(r *http.Request) {
|
||||
r.Header.Set("test_header", "test")
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = httpClient.AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploaderMultipartUploadPartFailure(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
httpClient := tests.NewClient(
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPost,
|
||||
URL: "http://test_bucket.example.com/test_key?uploads",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"x-amz-meta-a": "123",
|
||||
"x-amz-meta-b": "456",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Body: io.NopCloser(strings.NewReader(`
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<InitiateMultipartUploadResult>
|
||||
<Bucket>test_bucket</Bucket>
|
||||
<Key>test_key</Key>
|
||||
<UploadId>test_id</UploadId>
|
||||
</InitiateMultipartUploadResult>
|
||||
`)),
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPut,
|
||||
URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return string(body) == "abc" && tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Content-Length": "3",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Header: http.Header{"Etag": []string{"etag1"}},
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPut,
|
||||
URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
StatusCode: 400,
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodDelete,
|
||||
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
uploader := &s3.Uploader{
|
||||
S3: &s3.S3{
|
||||
Client: httpClient,
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
},
|
||||
Key: "test_key",
|
||||
Payload: strings.NewReader("abcdefg"),
|
||||
Metadata: map[string]string{"a": "123", "b": "456"},
|
||||
MinPartSize: 3,
|
||||
}
|
||||
|
||||
err := uploader.Upload(context.Background(), func(r *http.Request) {
|
||||
r.Header.Set("test_header", "test")
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("Expected non-nil error")
|
||||
}
|
||||
|
||||
err = httpClient.AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploaderMultipartUploadCompleteFailure(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
httpClient := tests.NewClient(
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPost,
|
||||
URL: "http://test_bucket.example.com/test_key?uploads",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"x-amz-meta-a": "123",
|
||||
"x-amz-meta-b": "456",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Body: io.NopCloser(strings.NewReader(`
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<InitiateMultipartUploadResult>
|
||||
<Bucket>test_bucket</Bucket>
|
||||
<Key>test_key</Key>
|
||||
<UploadId>test_id</UploadId>
|
||||
</InitiateMultipartUploadResult>
|
||||
`)),
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPut,
|
||||
URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return string(body) == "abc" && tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Content-Length": "3",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Header: http.Header{"Etag": []string{"etag1"}},
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPut,
|
||||
URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return string(body) == "def" && tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"Content-Length": "3",
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
Header: http.Header{"Etag": []string{"etag2"}},
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodPost,
|
||||
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
Response: &http.Response{
|
||||
StatusCode: 400,
|
||||
},
|
||||
},
|
||||
&tests.RequestStub{
|
||||
Method: http.MethodDelete,
|
||||
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
|
||||
Match: func(req *http.Request) bool {
|
||||
return tests.ExpectHeaders(req.Header, map[string]string{
|
||||
"test_header": "test",
|
||||
"Authorization": "^.+Credential=123/.+$",
|
||||
})
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
uploader := &s3.Uploader{
|
||||
S3: &s3.S3{
|
||||
Client: httpClient,
|
||||
Region: "test_region",
|
||||
Bucket: "test_bucket",
|
||||
Endpoint: "http://example.com",
|
||||
AccessKey: "123",
|
||||
SecretKey: "abc",
|
||||
},
|
||||
Key: "test_key",
|
||||
Payload: strings.NewReader("abcdef"),
|
||||
Metadata: map[string]string{"a": "123", "b": "456"},
|
||||
MinPartSize: 3,
|
||||
}
|
||||
|
||||
err := uploader.Upload(context.Background(), func(r *http.Request) {
|
||||
r.Header.Set("test_header", "test")
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("Expected non-nil error")
|
||||
}
|
||||
|
||||
err = httpClient.AssertNoRemaining()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue